From 23f4834faab9c763998f62a7990b3fea0bc515e0 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 4 Jul 2024 09:40:08 +0000 Subject: [PATCH 001/406] Bump versions after 8.14.2 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 6 +++--- .buildkite/pipelines/periodic.yml | 10 +++++----- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/Version.java | 1 + .../resources/org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 4124d4e550d11..89df398af0d1d 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.23", "8.14.2", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.3", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 4217fc91bf0fd..173a46b6d600c 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -577,8 +577,8 @@ steps: env: BWC_VERSION: 8.13.4 - - label: "{{matrix.image}} / 8.14.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.2 + - label: "{{matrix.image}} / 8.14.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.3 timeout_in_minutes: 300 matrix: setup: @@ -592,7 +592,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 8.14.2 + BWC_VERSION: 8.14.3 - label: "{{matrix.image}} / 8.15.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 06e7ffbc8fb1c..86a61814cf84b 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -642,8 +642,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.14.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.2#bwcTest + - label: 8.14.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.3#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -653,7 +653,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 8.14.2 + BWC_VERSION: 8.14.3 retry: automatic: - exit_status: "-1" @@ -751,7 +751,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.23", "8.14.2", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.3", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -801,7 +801,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.23", "8.14.2", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.3", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index bce556e9fc352..e3b5d9da1ef8a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -31,5 +31,5 @@ BWC_VERSION: - "8.11.4" - "8.12.2" - "8.13.4" - - "8.14.2" + - "8.14.3" - "8.15.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 5fc4b6c072899..7a7426c3a90a7 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - "7.17.23" - - "8.14.2" + - "8.14.3" - "8.15.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b2c78453d9c75..9ff7650d19823 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -178,6 +178,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_14_0 = new Version(8_14_00_99); public static final Version V_8_14_1 = new Version(8_14_01_99); public static final Version V_8_14_2 = new Version(8_14_02_99); + public static final Version V_8_14_3 = new Version(8_14_03_99); public static final Version V_8_15_0 = new Version(8_15_00_99); public static final Version CURRENT = V_8_15_0; diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index ba1dab5589ee2..5f1972e30198a 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -123,3 +123,4 @@ 8.13.4,8595001 8.14.0,8636001 8.14.1,8636001 +8.14.2,8636001 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index b7ca55a2b2b0d..d1116ddf99ee7 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -123,3 +123,4 @@ 8.13.4,8503000 8.14.0,8505000 8.14.1,8505000 +8.14.2,8505000 From 4f0da32c31191fd3476b752cd63bec2b52a97860 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 4 Jul 2024 09:47:29 +0000 Subject: [PATCH 002/406] Prune changelogs after 8.14.2 release --- docs/changelog/106253.yaml | 6 ------ docs/changelog/109341.yaml | 5 ----- docs/changelog/109492.yaml | 5 ----- docs/changelog/109500.yaml | 5 ----- docs/changelog/109533.yaml | 5 ----- docs/changelog/109629.yaml | 5 ----- docs/changelog/109632.yaml | 5 ----- docs/changelog/109636.yaml | 5 ----- docs/changelog/109695.yaml | 5 ----- docs/changelog/109824.yaml | 6 ------ docs/changelog/110035.yaml | 5 ----- docs/changelog/110103.yaml | 5 ----- 12 files changed, 62 deletions(-) delete mode 100644 docs/changelog/106253.yaml delete mode 100644 docs/changelog/109341.yaml delete mode 100644 docs/changelog/109492.yaml delete mode 100644 docs/changelog/109500.yaml delete mode 100644 docs/changelog/109533.yaml delete mode 100644 docs/changelog/109629.yaml delete mode 100644 docs/changelog/109632.yaml delete mode 100644 docs/changelog/109636.yaml delete mode 100644 docs/changelog/109695.yaml delete mode 100644 docs/changelog/109824.yaml delete mode 100644 docs/changelog/110035.yaml delete mode 100644 docs/changelog/110103.yaml diff --git a/docs/changelog/106253.yaml b/docs/changelog/106253.yaml deleted file mode 100644 index b80cda37f63c7..0000000000000 --- a/docs/changelog/106253.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106253 -summary: Fix for from parameter when using `sub_searches` and rank -area: Ranking -type: bug -issues: - - 99011 diff --git a/docs/changelog/109341.yaml b/docs/changelog/109341.yaml deleted file mode 100644 index 0c1eaa98a8aa2..0000000000000 --- a/docs/changelog/109341.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109341 -summary: Re-define `index.mapper.dynamic` setting in 8.x for a better 7.x to 8.x upgrade if this setting is used. -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/109492.yaml b/docs/changelog/109492.yaml deleted file mode 100644 index d4d1e83eb7786..0000000000000 --- a/docs/changelog/109492.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109492 -summary: Add hexstring support byte painless scorers -area: Search -type: bug -issues: [] diff --git a/docs/changelog/109500.yaml b/docs/changelog/109500.yaml deleted file mode 100644 index cfd6bc770d5d6..0000000000000 --- a/docs/changelog/109500.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109500 -summary: Guard file settings readiness on file settings support -area: Infra/Settings -type: bug -issues: [] diff --git a/docs/changelog/109533.yaml b/docs/changelog/109533.yaml deleted file mode 100644 index 5720410e5f370..0000000000000 --- a/docs/changelog/109533.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109533 -summary: Fix IndexOutOfBoundsException during inference -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/109629.yaml b/docs/changelog/109629.yaml deleted file mode 100644 index c468388117b72..0000000000000 --- a/docs/changelog/109629.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109629 -summary: "[Data streams] Fix the description of the lazy rollover task" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/109632.yaml b/docs/changelog/109632.yaml deleted file mode 100644 index 6b04160bbdbec..0000000000000 --- a/docs/changelog/109632.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109632 -summary: Force execute inactive sink reaper -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/109636.yaml b/docs/changelog/109636.yaml deleted file mode 100644 index f8f73a75dfd3d..0000000000000 --- a/docs/changelog/109636.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109636 -summary: "Ensure a lazy rollover request will rollover the target data stream once." -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/109695.yaml b/docs/changelog/109695.yaml deleted file mode 100644 index f922b76412676..0000000000000 --- a/docs/changelog/109695.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109695 -summary: Fix ESQL cancellation for exchange requests -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/109824.yaml b/docs/changelog/109824.yaml deleted file mode 100644 index 987e8c0a8b1a2..0000000000000 --- a/docs/changelog/109824.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109824 -summary: Check array size before returning array item in script doc values -area: Infra/Scripting -type: bug -issues: - - 104998 diff --git a/docs/changelog/110035.yaml b/docs/changelog/110035.yaml deleted file mode 100644 index 670c58240d835..0000000000000 --- a/docs/changelog/110035.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110035 -summary: Fix equals and hashcode for `SingleValueQuery.LuceneQuery` -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/110103.yaml b/docs/changelog/110103.yaml deleted file mode 100644 index 9f613ec2b446e..0000000000000 --- a/docs/changelog/110103.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110103 -summary: Fix automatic tracking of collapse with `docvalue_fields` -area: Search -type: bug -issues: [] From 9e9033b40f99ae276d12f52edad8346d5baf8a47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 4 Jul 2024 15:53:40 +0200 Subject: [PATCH 003/406] [DOCS] Adds 8.14.2 release notes to main. (#110471) (#110472) --- docs/reference/release-notes/8.14.2.asciidoc | 38 ++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 docs/reference/release-notes/8.14.2.asciidoc diff --git a/docs/reference/release-notes/8.14.2.asciidoc b/docs/reference/release-notes/8.14.2.asciidoc new file mode 100644 index 0000000000000..2bb374451b2ac --- /dev/null +++ b/docs/reference/release-notes/8.14.2.asciidoc @@ -0,0 +1,38 @@ +[[release-notes-8.14.2]] +== {es} version 8.14.2 + +coming[8.14.2] + +Also see <>. + +[[bug-8.14.2]] +[float] +=== Bug fixes + +Data streams:: +* Ensure a lazy rollover request will rollover the target data stream once. {es-pull}109636[#109636] +* [Data streams] Fix the description of the lazy rollover task {es-pull}109629[#109629] + +ES|QL:: +* Fix ESQL cancellation for exchange requests {es-pull}109695[#109695] +* Fix equals and hashcode for `SingleValueQuery.LuceneQuery` {es-pull}110035[#110035] +* Force execute inactive sink reaper {es-pull}109632[#109632] + +Infra/Scripting:: +* Check array size before returning array item in script doc values {es-pull}109824[#109824] (issue: {es-issue}104998[#104998]) + +Infra/Settings:: +* Guard file settings readiness on file settings support {es-pull}109500[#109500] + +Machine Learning:: +* Fix IndexOutOfBoundsException during inference {es-pull}109533[#109533] + +Mapping:: +* Re-define `index.mapper.dynamic` setting in 8.x for a better 7.x to 8.x upgrade if this setting is used. {es-pull}109341[#109341] + +Ranking:: +* Fix for from parameter when using `sub_searches` and rank {es-pull}106253[#106253] (issue: {es-issue}99011[#99011]) + +Search:: +* Add hexstring support byte painless scorers {es-pull}109492[#109492] +* Fix automatic tracking of collapse with `docvalue_fields` {es-pull}110103[#110103] \ No newline at end of file From e666865293df339619b2c7a482101e0925f7dcdd Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Thu, 4 Jul 2024 11:57:24 -0700 Subject: [PATCH 004/406] Reflect latest changes in synthetic source documentation (#109501) (#110489) --- docs/changelog/109501.yaml | 14 ++++ docs/reference/data-streams/tsds.asciidoc | 3 +- .../mapping/fields/source-field.asciidoc | 12 +-- .../mapping/fields/synthetic-source.asciidoc | 83 +++++++++++-------- 4 files changed, 70 insertions(+), 42 deletions(-) create mode 100644 docs/changelog/109501.yaml diff --git a/docs/changelog/109501.yaml b/docs/changelog/109501.yaml new file mode 100644 index 0000000000000..6e81f98816cbf --- /dev/null +++ b/docs/changelog/109501.yaml @@ -0,0 +1,14 @@ +pr: 109501 +summary: Reflect latest changes in synthetic source documentation +area: Mapping +type: enhancement +issues: [] +highlight: + title: Synthetic `_source` improvements + body: |- + There are multiple improvements to synthetic `_source` functionality: + + * Synthetic `_source` is now supported for all field types including `nested` and `object`. `object` fields are supported with `enabled` set to `false`. + + * Synthetic `_source` can be enabled together with `ignore_malformed` and `ignore_above` parameters for all field types that support them. + notable: false diff --git a/docs/reference/data-streams/tsds.asciidoc b/docs/reference/data-streams/tsds.asciidoc index 460048d8ccbc9..de89fa1ca3f31 100644 --- a/docs/reference/data-streams/tsds.asciidoc +++ b/docs/reference/data-streams/tsds.asciidoc @@ -53,8 +53,9 @@ shard segments by `_tsid` and `@timestamp`. documents, the document `_id` is a hash of the document's dimensions and `@timestamp`. A TSDS doesn't support custom document `_id` values. + * A TSDS uses <>, and as a result is -subject to a number of <>. +subject to some <> and <> applied to the `_source` field. NOTE: A time series index can contain fields other than dimensions or metrics. diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index ec824e421e015..903b301ab1a96 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -6,11 +6,11 @@ at index time. The `_source` field itself is not indexed (and thus is not searchable), but it is stored so that it can be returned when executing _fetch_ requests, like <> or <>. -If disk usage is important to you then have a look at -<> which shrinks disk usage at the cost of -only supporting a subset of mappings and slower fetches or (not recommended) -<> which also shrinks disk -usage but disables many features. +If disk usage is important to you, then consider the following options: + +- Using <>, which reconstructs source content at the time of retrieval instead of storing it on disk. This shrinks disk usage, at the cost of slower access to `_source` in <> and <> queries. +- <>. This shrinks disk +usage but disables features that rely on `_source`. include::synthetic-source.asciidoc[] @@ -43,7 +43,7 @@ available then a number of features are not supported: * The <>, <>, and <> APIs. -* In the {kib} link:{kibana-ref}/discover.html[Discover] application, field data will not be displayed. +* In the {kib} link:{kibana-ref}/discover.html[Discover] application, field data will not be displayed. * On the fly <>. diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index a0e7aed177a9c..ccea38cf602da 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -28,45 +28,22 @@ PUT idx While this on the fly reconstruction is *generally* slower than saving the source documents verbatim and loading them at query time, it saves a lot of storage -space. +space. Additional latency can be avoided by not loading `_source` field in queries when it is not needed. + +[[synthetic-source-fields]] +===== Supported fields +Synthetic `_source` is supported by all field types. Depending on implementation details, field types have different properties when used with synthetic `_source`. + +<> construct synthetic `_source` using existing data, most commonly <> and <>. For these field types, no additional space is needed to store the contents of `_source` field. Due to the storage layout of <>, the generated `_source` field undergoes <> compared to original document. + +For all other field types, the original value of the field is stored as is, in the same way as the `_source` field in non-synthetic mode. In this case there are no modifications and field data in `_source` is the same as in the original document. Similarly, malformed values of fields that use <> or <> need to be stored as is. This approach is less storage efficient since data needed for `_source` reconstruction is stored in addition to other data required to index the field (like `doc_values`). [[synthetic-source-restrictions]] ===== Synthetic `_source` restrictions -There are a couple of restrictions to be aware of: +Synthetic `_source` cannot be used together with field mappings that use <>. -* When you retrieve synthetic `_source` content it undergoes minor -<> compared to the original JSON. -* Synthetic `_source` can be used with indices that contain only these field -types: - -** <> -** {plugins}/mapper-annotated-text-usage.html#annotated-text-synthetic-source[`annotated-text`] -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> +Some field types have additional restrictions. These restrictions are documented in the **synthetic `_source`** section of the field type's <>. [[synthetic-source-modifications]] ===== Synthetic `_source` modifications @@ -178,4 +155,40 @@ that ordering. [[synthetic-source-modifications-ranges]] ====== Representation of ranges -Range field vales (e.g. `long_range`) are always represented as inclusive on both sides with bounds adjusted accordingly. See <>. +Range field values (e.g. `long_range`) are always represented as inclusive on both sides with bounds adjusted accordingly. See <>. + +[[synthetic-source-precision-loss-for-point-types]] +====== Reduced precision of `geo_point` values +Values of `geo_point` fields are represented in synthetic `_source` with reduced precision. See <>. + + +[[synthetic-source-fields-native-list]] +===== Field types that support synthetic source with no storage overhead +The following field types support synthetic source using data from <> or <>, and require no additional storage space to construct the `_source` field. + +NOTE: If you enable the <> or <> settings, then additional storage is required to store ignored field values for these types. + +** <> +** {plugins}/mapper-annotated-text-usage.html#annotated-text-synthetic-source[`annotated-text`] +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> From 04e533b4623dbf5ec07d50a35255e5a479771c87 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 4 Jul 2024 13:29:36 -0700 Subject: [PATCH 005/406] Fix esql enrich memory leak (#109275) (#110450) (#110492) This PR was reviewed in #109275 Block and Vector use a non-thread-safe RefCounted. Threads that increase or decrease the references must have a happen-before relationship. However, this order is not guaranteed in the enrich lookup for the reference of selectedPositions. The driver can complete the MergePositionsOperator, which decreases the reference count of selectedPositions, while the finally block may also decrease it in a separate thread. These actions occur without a defined happen-before relationship. Closes #108532 --- .../esql/enrich/EnrichLookupService.java | 164 ++++++++++-------- 1 file changed, 91 insertions(+), 73 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 87c558fe5bd1e..2425fa24b17c2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -31,7 +31,6 @@ import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LocalCircuitBreaker; import org.elasticsearch.compute.data.OrdinalBytesRefBlock; @@ -43,6 +42,7 @@ import org.elasticsearch.compute.operator.OutputOperator; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.MappedFieldType; @@ -247,30 +247,53 @@ private void doLookup( ActionListener listener ) { Block inputBlock = inputPage.getBlock(0); - final IntBlock selectedPositions; - final OrdinalBytesRefBlock ordinalsBytesRefBlock; - if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { - inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); - selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); - selectedPositions.mustIncRef(); - } else { - selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock(); + if (inputBlock.areAllValuesNull()) { + listener.onResponse(createNullResponse(inputPage.getPositionCount(), extractFields)); + return; } - LocalCircuitBreaker localBreaker = null; + final List releasables = new ArrayList<>(6); + boolean started = false; try { - if (inputBlock.areAllValuesNull()) { - listener.onResponse(createNullResponse(inputPage.getPositionCount(), extractFields)); - return; - } - ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY); - SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, SearchService.NO_TIMEOUT); - listener = ActionListener.runBefore(listener, searchContext::close); - localBreaker = new LocalCircuitBreaker( + final ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY); + final SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, SearchService.NO_TIMEOUT); + releasables.add(searchContext); + final LocalCircuitBreaker localBreaker = new LocalCircuitBreaker( blockFactory.breaker(), localBreakerSettings.overReservedBytes(), localBreakerSettings.maxOverReservedBytes() ); - DriverContext driverContext = new DriverContext(bigArrays, blockFactory.newChildFactory(localBreaker)); + releasables.add(localBreaker); + final DriverContext driverContext = new DriverContext(bigArrays, blockFactory.newChildFactory(localBreaker)); + final ElementType[] mergingTypes = new ElementType[extractFields.size()]; + for (int i = 0; i < extractFields.size(); i++) { + mergingTypes[i] = PlannerUtils.toElementType(extractFields.get(i).dataType()); + } + final int[] mergingChannels = IntStream.range(0, extractFields.size()).map(i -> i + 2).toArray(); + final MergePositionsOperator mergePositionsOperator; + final OrdinalBytesRefBlock ordinalsBytesRefBlock; + if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { + inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); + var selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); + mergePositionsOperator = new MergePositionsOperator( + 1, + mergingChannels, + mergingTypes, + selectedPositions, + driverContext.blockFactory() + ); + + } else { + try (var selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock()) { + mergePositionsOperator = new MergePositionsOperator( + 1, + mergingChannels, + mergingTypes, + selectedPositions, + driverContext.blockFactory() + ); + } + } + releasables.add(mergePositionsOperator); SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); MappedFieldType fieldType = searchExecutionContext.getFieldType(matchField); var queryList = switch (matchType) { @@ -284,57 +307,13 @@ private void doLookup( queryList, searchExecutionContext.getIndexReader() ); - List intermediateOperators = new ArrayList<>(extractFields.size() + 2); - final ElementType[] mergingTypes = new ElementType[extractFields.size()]; - // load the fields - List fields = new ArrayList<>(extractFields.size()); - for (int i = 0; i < extractFields.size(); i++) { - NamedExpression extractField = extractFields.get(i); - final ElementType elementType = PlannerUtils.toElementType(extractField.dataType()); - mergingTypes[i] = elementType; - EsPhysicalOperationProviders.ShardContext ctx = new EsPhysicalOperationProviders.DefaultShardContext( - 0, - searchContext.getSearchExecutionContext(), - searchContext.request().getAliasFilter() - ); - BlockLoader loader = ctx.blockLoader( - extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), - extractField.dataType() == DataType.UNSUPPORTED, - MappedFieldType.FieldExtractPreference.NONE - ); - fields.add( - new ValuesSourceReaderOperator.FieldInfo( - extractField.name(), - PlannerUtils.toElementType(extractField.dataType()), - shardIdx -> { - if (shardIdx != 0) { - throw new IllegalStateException("only one shard"); - } - return loader; - } - ) - ); - } - intermediateOperators.add( - new ValuesSourceReaderOperator( - driverContext.blockFactory(), - fields, - List.of( - new ValuesSourceReaderOperator.ShardContext( - searchContext.searcher().getIndexReader(), - searchContext::newSourceLoader - ) - ), - 0 - ) - ); - // merging field-values by position - final int[] mergingChannels = IntStream.range(0, extractFields.size()).map(i -> i + 2).toArray(); - intermediateOperators.add( - new MergePositionsOperator(1, mergingChannels, mergingTypes, selectedPositions, driverContext.blockFactory()) - ); + releasables.add(queryOperator); + var extractFieldsOperator = extractFieldsOperator(searchContext, driverContext, extractFields); + releasables.add(extractFieldsOperator); + AtomicReference result = new AtomicReference<>(); OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), result::set); + releasables.add(outputOperator); Driver driver = new Driver( "enrich-lookup:" + sessionId, System.currentTimeMillis(), @@ -350,18 +329,16 @@ private void doLookup( inputPage.getPositionCount() ), queryOperator, - intermediateOperators, + List.of(extractFieldsOperator, mergePositionsOperator), outputOperator, Driver.DEFAULT_STATUS_INTERVAL, - localBreaker + Releasables.wrap(searchContext, localBreaker) ); task.addListener(() -> { String reason = Objects.requireNonNullElse(task.getReasonCancelled(), "task was cancelled"); driver.cancel(reason); }); - var threadContext = transportService.getThreadPool().getThreadContext(); - localBreaker = null; Driver.start(threadContext, executor, driver, Driver.DEFAULT_MAX_ITERATIONS, listener.map(ignored -> { Page out = result.get(); if (out == null) { @@ -369,11 +346,52 @@ private void doLookup( } return out; })); + started = true; } catch (Exception e) { listener.onFailure(e); } finally { - Releasables.close(selectedPositions, localBreaker); + if (started == false) { + Releasables.close(releasables); + } + } + } + + private static Operator extractFieldsOperator( + SearchContext searchContext, + DriverContext driverContext, + List extractFields + ) { + EsPhysicalOperationProviders.ShardContext shardContext = new EsPhysicalOperationProviders.DefaultShardContext( + 0, + searchContext.getSearchExecutionContext(), + searchContext.request().getAliasFilter() + ); + List fields = new ArrayList<>(extractFields.size()); + for (NamedExpression extractField : extractFields) { + BlockLoader loader = shardContext.blockLoader( + extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), + extractField.dataType() == DataType.UNSUPPORTED, + MappedFieldType.FieldExtractPreference.NONE + ); + fields.add( + new ValuesSourceReaderOperator.FieldInfo( + extractField.name(), + PlannerUtils.toElementType(extractField.dataType()), + shardIdx -> { + if (shardIdx != 0) { + throw new IllegalStateException("only one shard"); + } + return loader; + } + ) + ); } + return new ValuesSourceReaderOperator( + driverContext.blockFactory(), + fields, + List.of(new ValuesSourceReaderOperator.ShardContext(searchContext.searcher().getIndexReader(), searchContext::newSourceLoader)), + 0 + ); } private Page createNullResponse(int positionCount, List extractFields) { From fe324d5b77d0e11ea460a0e605c904946541c816 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Fri, 5 Jul 2024 13:10:08 +0300 Subject: [PATCH 006/406] DOCS Query Roles (#110473) (#110499) These are the docs changes in relation to https://github.com/elastic/elasticsearch/pull/108733 --- docs/reference/rest-api/security.asciidoc | 2 + .../rest-api/security/get-roles.asciidoc | 5 +- .../rest-api/security/query-role.asciidoc | 283 ++++++++++++++++++ .../rest-api/security/query-user.asciidoc | 19 +- .../authorization/managing-roles.asciidoc | 14 +- 5 files changed, 311 insertions(+), 12 deletions(-) create mode 100644 docs/reference/rest-api/security/query-role.asciidoc diff --git a/docs/reference/rest-api/security.asciidoc b/docs/reference/rest-api/security.asciidoc index 04cd838c45600..82cf38e52bd80 100644 --- a/docs/reference/rest-api/security.asciidoc +++ b/docs/reference/rest-api/security.asciidoc @@ -50,6 +50,7 @@ Use the following APIs to add, remove, update, and retrieve roles in the native * <> * <> * <> +* <> [discrete] [[security-token-apis]] @@ -192,6 +193,7 @@ include::security/get-app-privileges.asciidoc[] include::security/get-builtin-privileges.asciidoc[] include::security/get-role-mappings.asciidoc[] include::security/get-roles.asciidoc[] +include::security/query-role.asciidoc[] include::security/get-service-accounts.asciidoc[] include::security/get-service-credentials.asciidoc[] include::security/get-settings.asciidoc[] diff --git a/docs/reference/rest-api/security/get-roles.asciidoc b/docs/reference/rest-api/security/get-roles.asciidoc index 3eb5a735194c6..3cc2f95c6ea7e 100644 --- a/docs/reference/rest-api/security/get-roles.asciidoc +++ b/docs/reference/rest-api/security/get-roles.asciidoc @@ -38,7 +38,10 @@ API cannot retrieve roles that are defined in roles files. ==== {api-response-body-title} A successful call returns an array of roles with the JSON representation of the -role. +role. The returned role format is a simple extension of the <> format, +only adding an extra field `transient_metadata.enabled`. +This field is `false` in case the role is automatically disabled, for example when the license +level does not allow some permissions that the role grants. [[security-api-get-role-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/rest-api/security/query-role.asciidoc b/docs/reference/rest-api/security/query-role.asciidoc new file mode 100644 index 0000000000000..937bd263140fc --- /dev/null +++ b/docs/reference/rest-api/security/query-role.asciidoc @@ -0,0 +1,283 @@ +[role="xpack"] +[[security-api-query-role]] +=== Query Role API + +++++ +Query Role +++++ + +Retrieves roles with <> in a <> fashion. + +[[security-api-query-role-request]] +==== {api-request-title} + +`GET /_security/_query/role` + +`POST /_security/_query/role` + +[[security-api-query-role-prereqs]] +==== {api-prereq-title} + +* To use this API, you must have at least the `read_security` cluster privilege. + +[[security-api-query-role-desc]] +==== {api-description-title} + +The role management APIs are generally the preferred way to manage roles, rather than using +<>. +The query roles API does not retrieve roles that are defined in roles files, nor <> ones. +You can optionally filter the results with a query. Also, the results can be paginated and sorted. + +[[security-api-query-role-request-body]] +==== {api-request-body-title} + +You can specify the following parameters in the request body: + +`query`:: +(Optional, string) A <> to filter which roles to return. +The query supports a subset of query types, including +<>, <>, +<>, <>, +<>, <>, +<>, <>, +<>, <>, +and <>. ++ +You can query the following values associated with a role. ++ +.Valid values for `query` +[%collapsible%open] +==== +`name`:: +(keyword) The <> of the role. + +`description`:: +(text) The <> of the role. + +`metadata`:: +(flattened) Metadata field associated with the <>, such as `metadata.app_tag`. +Note that metadata is internally indexed as a <> field type. +This means that all sub-fields act like `keyword` fields when querying and sorting. +It also implies that it is not possible to refer to a subset of metadata fields using wildcard patterns, +e.g. `metadata.field*`, even for query types that support field name patterns. +Lastly, all the metadata fields can be searched together when simply mentioning the +`metadata` field (i.e. not followed by any dot and sub-field name). + +`applications`:: +The list of <> that the role grants. + +`application`::: +(keyword) The name of the application associated to the privileges and resources. + +`privileges`::: +(keyword) The names of the privileges that the role grants. + +`resources`::: +(keyword) The resources to which the privileges apply. + +==== + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=from] ++ +By default, you cannot page through more than 10,000 hits using the `from` and +`size` parameters. To page through more hits, use the +<> parameter. + +`size`:: +(Optional, integer) The number of hits to return. Must not be negative and defaults to `10`. ++ +By default, you cannot page through more than 10,000 hits using the `from` and +`size` parameters. To page through more hits, use the +<> parameter. + +`sort`:: +(Optional, object) <>. You can sort on `username`, `roles` or `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. + +`search_after`:: +(Optional, array) <> definition. + + +[[security-api-query-role-response-body]] +==== {api-response-body-title} + +This API returns the following top level fields: + +`total`:: +The total number of roles found. + +`count`:: +The number of roles returned in the response. + +`roles`:: +A list of roles that match the query. +The returned role format is an extension of the <> format. +It adds the `transient_metadata.enabled` and the `_sort` fields. +`transient_metadata.enabled` is set to `false` in case the role is automatically disabled, +for example when the role grants privileges that are not allowed by the installed license. +`_sort` is present when the search query sorts on some field. +It contains the array of values that have been used for sorting. + +[[security-api-query-role-example]] +==== {api-examples-title} + +The following request lists all roles, sorted by the role name: + +[source,console] +---- +POST /_security/_query/role +{ + "sort": ["name"] +} +---- +// TEST[setup:admin_role,user_role] + +A successful call returns a JSON structure that contains the information +retrieved for one or more roles: + +[source,console-result] +---- +{ + "total": 2, + "count": 2, + "roles": [ <1> + { + "name" : "my_admin_role", + "cluster" : [ + "all" + ], + "indices" : [ + { + "names" : [ + "index1", + "index2" + ], + "privileges" : [ + "all" + ], + "field_security" : { + "grant" : [ + "title", + "body" + ] + }, + "allow_restricted_indices" : false + } + ], + "applications" : [ ], + "run_as" : [ + "other_user" + ], + "metadata" : { + "version" : 1 + }, + "transient_metadata" : { + "enabled" : true + }, + "description" : "Grants full access to all management features within the cluster.", + "_sort" : [ + "my_admin_role" + ] + }, + { + "name" : "my_user_role", + "cluster" : [ ], + "indices" : [ + { + "names" : [ + "index1", + "index2" + ], + "privileges" : [ + "all" + ], + "field_security" : { + "grant" : [ + "title", + "body" + ] + }, + "allow_restricted_indices" : false + } + ], + "applications" : [ ], + "run_as" : [ ], + "metadata" : { + "version" : 1 + }, + "transient_metadata" : { + "enabled" : true + }, + "description" : "Grants user access to some indicies.", + "_sort" : [ + "my_user_role" + ] + } + ] +} +---- +// TEST[continued] + +<1> The list of roles that were retrieved for this request + +Similarly, the following request can be used to query only the user access role, +given its description: + +[source,console] +---- +POST /_security/_query/role +{ + "query": { + "match": { + "description": { + "query": "user access" + } + } + }, + "size": 1 <1> +} +---- +// TEST[continued] + +<1> Return only the best matching role + +[source,console-result] +---- +{ + "total": 2, + "count": 1, + "roles": [ + { + "name" : "my_user_role", + "cluster" : [ ], + "indices" : [ + { + "names" : [ + "index1", + "index2" + ], + "privileges" : [ + "all" + ], + "field_security" : { + "grant" : [ + "title", + "body" + ] + }, + "allow_restricted_indices" : false + } + ], + "applications" : [ ], + "run_as" : [ ], + "metadata" : { + "version" : 1 + }, + "transient_metadata" : { + "enabled" : true + }, + "description" : "Grants user access to some indicies." + } + ] +} +---- diff --git a/docs/reference/rest-api/security/query-user.asciidoc b/docs/reference/rest-api/security/query-user.asciidoc index 952e0f40f2a3a..23852f0f2eed7 100644 --- a/docs/reference/rest-api/security/query-user.asciidoc +++ b/docs/reference/rest-api/security/query-user.asciidoc @@ -66,13 +66,6 @@ The email of the user. Specifies whether the user is enabled. ==== -[[security-api-query-user-query-params]] -==== {api-query-parms-title} - -`with_profile_uid`:: -(Optional, boolean) Determines whether to retrieve the <> `uid`, -if exists, for the users. Defaults to `false`. - include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=from] + By default, you cannot page through more than 10,000 hits using the `from` and @@ -93,6 +86,12 @@ In addition, sort can also be applied to the `_doc` field to sort by index order `search_after`:: (Optional, array) <> definition. +[[security-api-query-user-query-params]] +==== {api-query-parms-title} + +`with_profile_uid`:: +(Optional, boolean) Determines whether to retrieve the <> `uid`, +if exists, for the users. Defaults to `false`. [[security-api-query-user-response-body]] ==== {api-response-body-title} @@ -191,7 +190,7 @@ Use the user information retrieve the user with a query: [source,console] ---- -GET /_security/_query/user +POST /_security/_query/user { "query": { "prefix": { @@ -231,7 +230,7 @@ To retrieve the user `profile_uid` as part of the response: [source,console] -------------------------------------------------- -GET /_security/_query/user?with_profile_uid=true +POST /_security/_query/user?with_profile_uid=true { "query": { "prefix": { @@ -272,7 +271,7 @@ Use a `bool` query to issue complex logical conditions and use [source,js] ---- -GET /_security/_query/user +POST /_security/_query/user { "query": { "bool": { diff --git a/docs/reference/security/authorization/managing-roles.asciidoc b/docs/reference/security/authorization/managing-roles.asciidoc index 253aa33822234..535d70cbc5e9c 100644 --- a/docs/reference/security/authorization/managing-roles.asciidoc +++ b/docs/reference/security/authorization/managing-roles.asciidoc @@ -13,7 +13,9 @@ A role is defined by the following JSON structure: "indices": [ ... ], <4> "applications": [ ... ], <5> "remote_indices": [ ... ], <6> - "remote_cluster": [ ... ] <7> + "remote_cluster": [ ... ], <7> + "metadata": { ... }, <8> + "description": "..." <9> } ----- // NOTCONSOLE @@ -40,6 +42,16 @@ A role is defined by the following JSON structure: <>. This field is optional (missing `remote_cluster` privileges effectively means no additional cluster permissions for any API key based remote clusters). +<8> Metadata field associated with the role, such as `metadata.app_tag`. + Metadata is internally indexed as a <> field type. + This means that all sub-fields act like `keyword` fields when querying and sorting. + Metadata values can be simple values, but also lists and maps. + This field is optional. +<9> A string value with the description text of the role. + The maximum length of it is `1000` chars. + The field is internally indexed as a <> field type + (with default values for all parameters). + This field is optional. [[valid-role-name]] NOTE: Role names must be at least 1 and no more than 507 characters. They can From f6a888c265659f44e312683ac59f8cf639bfa7a6 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 5 Jul 2024 12:32:33 +0200 Subject: [PATCH 007/406] Slightly adjust wording around potential savings mentioned in the description of the index.codec setting (#110468) (#110515) --- docs/reference/index-modules.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 04bebfae2763b..24149afe802a2 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -81,8 +81,9 @@ breaking change]. If you are updating the compression type, the new one will be applied after segments are merged. Segment merging can be forced using <>. Experiments with indexing log datasets - have shown that `best_compression` gives up to ~18% lower storage usage - compared to `default` while only minimally affecting indexing throughput (~2%). + have shown that `best_compression` gives up to ~18% lower storage usage in + the most ideal scenario compared to `default` while only minimally affecting + indexing throughput (~2%). [[index-mode-setting]] `index.mode`:: + From c7d322443791cb245d93a63b7dd95c7f3f03bc01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Fri, 5 Jul 2024 13:52:20 +0200 Subject: [PATCH 008/406] Always write empty role descriptor fields to index (#110424) (#110498) * Always write empty role descriptor fields to index --- muted-tests.yml | 6 - .../action/role/QueryRoleResponse.java | 2 +- .../core/security/authz/RoleDescriptor.java | 17 +-- .../security/role/BulkPutRoleRestIT.java | 110 +++++++++++++++++- .../authz/store/NativeRolesStore.java | 40 +++++-- .../authz/store/NativeRolesStoreTests.java | 60 +++++++++- 6 files changed, 203 insertions(+), 32 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index d8eba8ad2dba6..91f38f3a5ba46 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -109,12 +109,6 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: "org.elasticsearch.xpack.security.role.RoleWithDescriptionRestIT" - issue: "https://github.com/elastic/elasticsearch/issues/110416" - method: "testCreateOrUpdateRoleWithDescription" -- class: "org.elasticsearch.xpack.security.role.RoleWithDescriptionRestIT" - issue: "https://github.com/elastic/elasticsearch/issues/110417" - method: "testCreateOrUpdateRoleWithDescription" - class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT method: test {p0=search.vectors/41_knn_search_half_byte_quantized/Test create, merge, and search cosine} issue: https://github.com/elastic/elasticsearch/issues/109978 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleResponse.java index 6bdc6c66c1835..8e9da10e449ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleResponse.java @@ -86,7 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // other details of the role descriptor (in the same object). assert Strings.isNullOrEmpty(roleDescriptor.getName()) == false; builder.field("name", roleDescriptor.getName()); - roleDescriptor.innerToXContent(builder, params, false, false); + roleDescriptor.innerToXContent(builder, params, false); if (sortValues != null && sortValues.length > 0) { builder.array("_sort", sortValues); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 7bedab61bd43d..1a8839fa0fa4a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -417,13 +417,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean docCreation) throws IOException { - return toXContent(builder, params, docCreation, false); - } - - public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean docCreation, boolean includeMetadataFlattened) - throws IOException { builder.startObject(); - innerToXContent(builder, params, docCreation, includeMetadataFlattened); + innerToXContent(builder, params, docCreation); return builder.endObject(); } @@ -435,12 +430,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, boolea * @param docCreation {@code true} if the x-content is being generated for creating a document * in the security index, {@code false} if the x-content being generated * is for API display purposes - * @param includeMetadataFlattened {@code true} if the metadataFlattened field should be included in doc * @return x-content builder * @throws IOException if there was an error writing the x-content to the builder */ - public XContentBuilder innerToXContent(XContentBuilder builder, Params params, boolean docCreation, boolean includeMetadataFlattened) - throws IOException { + public XContentBuilder innerToXContent(XContentBuilder builder, Params params, boolean docCreation) throws IOException { builder.array(Fields.CLUSTER.getPreferredName(), clusterPrivileges); if (configurableClusterPrivileges.length != 0) { builder.field(Fields.GLOBAL.getPreferredName()); @@ -452,9 +445,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params, b builder.array(Fields.RUN_AS.getPreferredName(), runAs); } builder.field(Fields.METADATA.getPreferredName(), metadata); - if (includeMetadataFlattened) { - builder.field(Fields.METADATA_FLATTENED.getPreferredName(), metadata); - } + if (docCreation) { builder.field(Fields.TYPE.getPreferredName(), ROLE_TYPE); } else { @@ -1196,7 +1187,7 @@ private static ApplicationResourcePrivileges parseApplicationPrivilege(String ro public static final class RemoteIndicesPrivileges implements Writeable, ToXContentObject { - private static final RemoteIndicesPrivileges[] NONE = new RemoteIndicesPrivileges[0]; + public static final RemoteIndicesPrivileges[] NONE = new RemoteIndicesPrivileges[0]; private final IndicesPrivileges indicesPrivileges; private final String[] remoteClusters; diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkPutRoleRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkPutRoleRestIT.java index 0297abad7a508..88b952f33394e 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkPutRoleRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkPutRoleRestIT.java @@ -181,15 +181,74 @@ public void testPutNoValidRoles() throws Exception { public void testBulkUpdates() throws Exception { String request = """ {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test2": - {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["read"]}]}, "test3": - {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["write"]}]}}}"""; - + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["read"]}], "description": "something"}, "test3": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["write"]}], "remote_indices":[{"names":["logs-*"], + "privileges":["read"],"clusters":["my_cluster*","other_cluster"]}]}}}"""; { Map responseMap = upsertRoles(request); assertThat(responseMap, not(hasKey("errors"))); List> items = (List>) responseMap.get("created"); assertEquals(3, items.size()); + + fetchRoleAndAssertEqualsExpected( + "test1", + new RoleDescriptor( + "test1", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + fetchRoleAndAssertEqualsExpected( + "test2", + new RoleDescriptor( + "test2", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("read").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + "something" + ) + ); + fetchRoleAndAssertEqualsExpected( + "test3", + new RoleDescriptor( + "test3", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("write").build() }, + null, + null, + null, + null, + null, + new RoleDescriptor.RemoteIndicesPrivileges[] { + RoleDescriptor.RemoteIndicesPrivileges.builder("my_cluster*", "other_cluster") + .indices("logs-*") + .privileges("read") + .build() }, + null, + null, + null + ) + ); } { Map responseMap = upsertRoles(request); @@ -200,7 +259,7 @@ public void testBulkUpdates() throws Exception { } { request = """ - {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["read"]}]}, "test2": + {"roles": {"test1": {}, "test2": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test3": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}}}"""; @@ -208,6 +267,49 @@ public void testBulkUpdates() throws Exception { assertThat(responseMap, not(hasKey("errors"))); List> items = (List>) responseMap.get("updated"); assertEquals(3, items.size()); + + assertThat(responseMap, not(hasKey("errors"))); + + fetchRoleAndAssertEqualsExpected( + "test1", + new RoleDescriptor("test1", null, null, null, null, null, null, null, null, null, null, null) + ); + fetchRoleAndAssertEqualsExpected( + "test2", + new RoleDescriptor( + "test2", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + fetchRoleAndAssertEqualsExpected( + "test3", + new RoleDescriptor( + "test3", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index adeada6cbf6cf..a2d2b21b489ea 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -59,6 +59,7 @@ import org.elasticsearch.xpack.core.security.action.role.RoleDescriptorRequestValidator; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.core.security.support.NativeRealmValidationUtil; @@ -607,16 +608,41 @@ private DeleteRequest createRoleDeleteRequest(final String roleName) { return client.prepareDelete(SECURITY_MAIN_ALIAS, getIdForRole(roleName)).request(); } - private XContentBuilder createRoleXContentBuilder(RoleDescriptor role) throws IOException { + // Package private for testing + XContentBuilder createRoleXContentBuilder(RoleDescriptor role) throws IOException { assert NativeRealmValidationUtil.validateRoleName(role.getName(), false) == null : "Role name was invalid or reserved: " + role.getName(); assert false == role.hasRestriction() : "restriction is not supported for native roles"; - return role.toXContent( - jsonBuilder(), - ToXContent.EMPTY_PARAMS, - true, - featureService.clusterHasFeature(clusterService.state(), SECURITY_ROLES_METADATA_FLATTENED) - ); + + XContentBuilder builder = jsonBuilder().startObject(); + role.innerToXContent(builder, ToXContent.EMPTY_PARAMS, true); + + if (featureService.clusterHasFeature(clusterService.state(), SECURITY_ROLES_METADATA_FLATTENED)) { + builder.field(RoleDescriptor.Fields.METADATA_FLATTENED.getPreferredName(), role.getMetadata()); + } + + // When role descriptor XContent is generated for the security index all empty fields need to have default values to make sure + // existing values are overwritten if not present since the request to update could be an UpdateRequest + // (update provided fields in existing document or create document) or IndexRequest (replace and reindex document) + if (role.hasConfigurableClusterPrivileges() == false) { + builder.startObject(RoleDescriptor.Fields.GLOBAL.getPreferredName()).endObject(); + } + + if (role.hasRemoteIndicesPrivileges() == false) { + builder.field(RoleDescriptor.Fields.REMOTE_INDICES.getPreferredName(), RoleDescriptor.RemoteIndicesPrivileges.NONE); + } + + if (role.hasRemoteClusterPermissions() == false + && clusterService.state().getMinTransportVersion().onOrAfter(ROLE_REMOTE_CLUSTER_PRIVS)) { + builder.array(RoleDescriptor.Fields.REMOTE_CLUSTER.getPreferredName(), RemoteClusterPermissions.NONE); + } + if (role.hasDescription() == false + && clusterService.state().getMinTransportVersion().onOrAfter(TransportVersions.SECURITY_ROLE_DESCRIPTION)) { + builder.field(RoleDescriptor.Fields.DESCRIPTION.getPreferredName(), ""); + } + + builder.endObject(); + return builder; } public void usageStats(ActionListener> listener) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index a4ee449438fe0..bfa358d0b7d6e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -78,6 +79,7 @@ import org.mockito.Mockito; import java.io.IOException; +import java.lang.reflect.Field; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; @@ -138,7 +140,7 @@ private NativeRolesStore createRoleStoreForTest() { private NativeRolesStore createRoleStoreForTest(Settings settings) { new ReservedRolesStore(Set.of("superuser")); - final ClusterService clusterService = mock(ClusterService.class); + final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(TransportVersion.current()); final SecuritySystemIndices systemIndices = new SecuritySystemIndices(settings); final FeatureService featureService = mock(FeatureService.class); systemIndices.init(client, featureService, clusterService); @@ -807,6 +809,62 @@ public void testBulkDeleteReservedRole() { verify(client, times(0)).bulk(any(BulkRequest.class), any()); } + /** + * Make sure all top level fields for a RoleDescriptor have default values to make sure they can be set to empty in an upsert + * call to the roles API + */ + public void testAllTopFieldsHaveEmptyDefaultsForUpsert() throws IOException, IllegalAccessException { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + RoleDescriptor allNullDescriptor = new RoleDescriptor( + "all-null-descriptor", + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Set fieldsWithoutDefaultValue = Set.of( + RoleDescriptor.Fields.INDEX, + RoleDescriptor.Fields.NAMES, + RoleDescriptor.Fields.ALLOW_RESTRICTED_INDICES, + RoleDescriptor.Fields.RESOURCES, + RoleDescriptor.Fields.QUERY, + RoleDescriptor.Fields.PRIVILEGES, + RoleDescriptor.Fields.CLUSTERS, + RoleDescriptor.Fields.APPLICATION, + RoleDescriptor.Fields.FIELD_PERMISSIONS, + RoleDescriptor.Fields.FIELD_PERMISSIONS_2X, + RoleDescriptor.Fields.GRANT_FIELDS, + RoleDescriptor.Fields.EXCEPT_FIELDS, + RoleDescriptor.Fields.METADATA_FLATTENED, + RoleDescriptor.Fields.TRANSIENT_METADATA, + RoleDescriptor.Fields.RESTRICTION, + RoleDescriptor.Fields.WORKFLOWS + ); + + String serializedOutput = Strings.toString(rolesStore.createRoleXContentBuilder(allNullDescriptor)); + Field[] fields = RoleDescriptor.Fields.class.getFields(); + + for (Field field : fields) { + ParseField fieldValue = (ParseField) field.get(null); + if (fieldsWithoutDefaultValue.contains(fieldValue) == false) { + assertThat( + "New RoleDescriptor field without a default value detected. " + + "Set a value or add to excluded list if not expected to be set to empty through role APIs", + serializedOutput, + containsString(fieldValue.getPreferredName()) + ); + } + } + } + private ClusterService mockClusterServiceWithMinNodeVersion(TransportVersion transportVersion) { final ClusterService clusterService = mock(ClusterService.class, Mockito.RETURNS_DEEP_STUBS); when(clusterService.state().getMinTransportVersion()).thenReturn(transportVersion); From 6e628de31aa28d950b33d8e3707d30cc0ce3f567 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Fri, 5 Jul 2024 15:18:22 +0200 Subject: [PATCH 009/406] Add audit logging for bulk role APIs (#110410) (#110519) * Add audit logging for bulk put role --- .../role/BulkPutRoleRequestBuilder.java | 2 +- .../action/role/BulkPutRolesRequest.java | 4 +- .../audit/logfile/LoggingAuditTrail.java | 47 +++++++--- .../audit/logfile/LoggingAuditTrailTests.java | 88 ++++++++++++++----- 4 files changed, 106 insertions(+), 35 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilder.java index ba199e183d4af..cda45a67e81c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilder.java @@ -44,7 +44,7 @@ public class BulkPutRoleRequestBuilder extends ActionRequestBuilder roles; - public BulkPutRolesRequest() {} + public BulkPutRolesRequest(List roles) { + this.roles = roles; + } public void setRoles(List roles) { this.roles = roles; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 01104806c4a1c..bc5cc4a5e6b3f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -44,6 +44,7 @@ import org.elasticsearch.xcontent.json.JsonStringEncoder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.Grant; import org.elasticsearch.xpack.core.security.action.apikey.AbstractCreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.BaseSingleUpdateApiKeyRequest; @@ -72,6 +73,8 @@ import org.elasticsearch.xpack.core.security.action.profile.SetProfileEnabledRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; @@ -291,6 +294,8 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { PutUserAction.NAME, PutRoleAction.NAME, PutRoleMappingAction.NAME, + ActionTypes.BULK_PUT_ROLES.name(), + ActionTypes.BULK_DELETE_ROLES.name(), TransportSetEnabledAction.TYPE.name(), TransportChangePasswordAction.TYPE.name(), CreateApiKeyAction.NAME, @@ -731,6 +736,11 @@ public void accessGranted( } else if (msg instanceof PutRoleRequest) { assert PutRoleAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((PutRoleRequest) msg).build(); + } else if (msg instanceof BulkPutRolesRequest bulkPutRolesRequest) { + assert ActionTypes.BULK_PUT_ROLES.name().equals(action); + for (RoleDescriptor roleDescriptor : bulkPutRolesRequest.getRoles()) { + securityChangeLogEntryBuilder(requestId).withRequestBody(roleDescriptor.getName(), roleDescriptor).build(); + } } else if (msg instanceof PutRoleMappingRequest) { assert PutRoleMappingAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((PutRoleMappingRequest) msg).build(); @@ -755,6 +765,11 @@ public void accessGranted( } else if (msg instanceof DeleteRoleRequest) { assert DeleteRoleAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((DeleteRoleRequest) msg).build(); + } else if (msg instanceof BulkDeleteRolesRequest bulkDeleteRolesRequest) { + assert ActionTypes.BULK_DELETE_ROLES.name().equals(action); + for (String roleName : bulkDeleteRolesRequest.getRoleNames()) { + securityChangeLogEntryBuilder(requestId).withDeleteRole(roleName).build(); + } } else if (msg instanceof DeleteRoleMappingRequest) { assert DeleteRoleMappingAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((DeleteRoleMappingRequest) msg).build(); @@ -1160,15 +1175,19 @@ LogEntryBuilder withRequestBody(ChangePasswordRequest changePasswordRequest) thr } LogEntryBuilder withRequestBody(PutRoleRequest putRoleRequest) throws IOException { + return withRequestBody(putRoleRequest.name(), putRoleRequest.roleDescriptor()); + } + + LogEntryBuilder withRequestBody(String roleName, RoleDescriptor roleDescriptor) throws IOException { logEntry.with(EVENT_ACTION_FIELD_NAME, "put_role"); XContentBuilder builder = JsonXContent.contentBuilder().humanReadable(true); builder.startObject() .startObject("role") - .field("name", putRoleRequest.name()) + .field("name", roleName) // the "role_descriptor" nested structure, where the "name" is left out, is closer to the event structure // for creating API Keys .field("role_descriptor"); - withRoleDescriptor(builder, putRoleRequest.roleDescriptor()); + withRoleDescriptor(builder, roleDescriptor); builder.endObject() // role .endObject(); logEntry.with(PUT_CONFIG_FIELD_NAME, Strings.toString(builder)); @@ -1350,7 +1369,7 @@ private static void withRoleDescriptor(XContentBuilder builder, RoleDescriptor r withIndicesPrivileges(builder, indicesPrivileges); } builder.endArray(); - // the toXContent method of the {@code RoleDescriptor.ApplicationResourcePrivileges) does a good job + // the toXContent method of the {@code RoleDescriptor.ApplicationResourcePrivileges} does a good job builder.xContentList(RoleDescriptor.Fields.APPLICATIONS.getPreferredName(), roleDescriptor.getApplicationPrivileges()); builder.array(RoleDescriptor.Fields.RUN_AS.getPreferredName(), roleDescriptor.getRunAs()); if (roleDescriptor.getMetadata() != null && false == roleDescriptor.getMetadata().isEmpty()) { @@ -1401,15 +1420,7 @@ LogEntryBuilder withRequestBody(DeleteUserRequest deleteUserRequest) throws IOEx } LogEntryBuilder withRequestBody(DeleteRoleRequest deleteRoleRequest) throws IOException { - logEntry.with(EVENT_ACTION_FIELD_NAME, "delete_role"); - XContentBuilder builder = JsonXContent.contentBuilder().humanReadable(true); - builder.startObject() - .startObject("role") - .field("name", deleteRoleRequest.name()) - .endObject() // role - .endObject(); - logEntry.with(DELETE_CONFIG_FIELD_NAME, Strings.toString(builder)); - return this; + return withDeleteRole(deleteRoleRequest.name()); } LogEntryBuilder withRequestBody(DeleteRoleMappingRequest deleteRoleMappingRequest) throws IOException { @@ -1532,6 +1543,18 @@ LogEntryBuilder withRequestBody(SetProfileEnabledRequest setProfileEnabledReques return this; } + LogEntryBuilder withDeleteRole(String roleName) throws IOException { + logEntry.with(EVENT_ACTION_FIELD_NAME, "delete_role"); + XContentBuilder builder = JsonXContent.contentBuilder().humanReadable(true); + builder.startObject() + .startObject("role") + .field("name", roleName) + .endObject() // role + .endObject(); + logEntry.with(DELETE_CONFIG_FIELD_NAME, Strings.toString(builder)); + return this; + } + static void withGrant(XContentBuilder builder, Grant grant) throws IOException { builder.startObject("grant").field("type", grant.getType()); if (grant.getUsername() != null) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index a3292a6ab5f4e..17bad90415e7c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; @@ -73,6 +74,8 @@ import org.elasticsearch.xpack.core.security.action.profile.SetProfileEnabledRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; @@ -772,20 +775,19 @@ public void testSecurityConfigChangeEventFormattingForRoles() throws IOException auditTrail.accessGranted(requestId, authentication, PutRoleAction.NAME, putRoleRequest, authorizationInfo); output = CapturingLogger.output(logger.getName(), Level.INFO); assertThat(output.size(), is(2)); - String generatedPutRoleAuditEventString = output.get(1); - String expectedPutRoleAuditEventString = Strings.format(""" - "put":{"role":{"name":"%s","role_descriptor":%s}}\ - """, putRoleRequest.name(), auditedRolesMap.get(putRoleRequest.name())); - assertThat(generatedPutRoleAuditEventString, containsString(expectedPutRoleAuditEventString)); - generatedPutRoleAuditEventString = generatedPutRoleAuditEventString.replace(", " + expectedPutRoleAuditEventString, ""); - checkedFields = new HashMap<>(commonFields); - checkedFields.remove(LoggingAuditTrail.ORIGIN_ADDRESS_FIELD_NAME); - checkedFields.remove(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME); - checkedFields.put("type", "audit"); - checkedFields.put(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "security_config_change"); - checkedFields.put(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME, "put_role"); - checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); - assertMsg(generatedPutRoleAuditEventString, checkedFields); + assertPutRoleAuditLogLine(putRoleRequest.name(), output.get(1), auditedRolesMap, requestId); + // clear log + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + + BulkPutRolesRequest bulkPutRolesRequest = new BulkPutRolesRequest(allTestRoleDescriptors); + bulkPutRolesRequest.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); + auditTrail.accessGranted(requestId, authentication, ActionTypes.BULK_PUT_ROLES.name(), bulkPutRolesRequest, authorizationInfo); + output = CapturingLogger.output(logger.getName(), Level.INFO); + assertThat(output.size(), is(allTestRoleDescriptors.size() + 1)); + + for (int i = 0; i < allTestRoleDescriptors.size(); i++) { + assertPutRoleAuditLogLine(allTestRoleDescriptors.get(i).getName(), output.get(i + 1), auditedRolesMap, requestId); + } // clear log CapturingLogger.output(logger.getName(), Level.INFO).clear(); @@ -795,25 +797,64 @@ public void testSecurityConfigChangeEventFormattingForRoles() throws IOException auditTrail.accessGranted(requestId, authentication, DeleteRoleAction.NAME, deleteRoleRequest, authorizationInfo); output = CapturingLogger.output(logger.getName(), Level.INFO); assertThat(output.size(), is(2)); - String generatedDeleteRoleAuditEventString = output.get(1); + assertDeleteRoleAuditLogLine(putRoleRequest.name(), output.get(1), requestId); + // clear log + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + + BulkDeleteRolesRequest bulkDeleteRolesRequest = new BulkDeleteRolesRequest( + allTestRoleDescriptors.stream().map(RoleDescriptor::getName).toList() + ); + bulkDeleteRolesRequest.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); + auditTrail.accessGranted( + requestId, + authentication, + ActionTypes.BULK_DELETE_ROLES.name(), + bulkDeleteRolesRequest, + authorizationInfo + ); + output = CapturingLogger.output(logger.getName(), Level.INFO); + assertThat(output.size(), is(allTestRoleDescriptors.size() + 1)); + for (int i = 0; i < allTestRoleDescriptors.size(); i++) { + assertDeleteRoleAuditLogLine(allTestRoleDescriptors.get(i).getName(), output.get(i + 1), requestId); + } + } + + private void assertPutRoleAuditLogLine(String roleName, String logLine, Map expectedLogByRoleName, String requestId) { + String expectedPutRoleAuditEventString = Strings.format(""" + "put":{"role":{"name":"%s","role_descriptor":%s}}\ + """, roleName, expectedLogByRoleName.get(roleName)); + + assertThat(logLine, containsString(expectedPutRoleAuditEventString)); + String reducedLogLine = logLine.replace(", " + expectedPutRoleAuditEventString, ""); + Map checkedFields = new HashMap<>(commonFields); + checkedFields.remove(LoggingAuditTrail.ORIGIN_ADDRESS_FIELD_NAME); + checkedFields.remove(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME); + checkedFields.put("type", "audit"); + checkedFields.put(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "security_config_change"); + checkedFields.put(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME, "put_role"); + checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); + assertMsg(reducedLogLine, checkedFields); + } + + private void assertDeleteRoleAuditLogLine(String roleName, String logLine, String requestId) { StringBuilder deleteRoleStringBuilder = new StringBuilder().append("\"delete\":{\"role\":{\"name\":"); - if (deleteRoleRequest.name() == null) { + if (roleName == null) { deleteRoleStringBuilder.append("null"); } else { - deleteRoleStringBuilder.append("\"").append(deleteRoleRequest.name()).append("\""); + deleteRoleStringBuilder.append("\"").append(roleName).append("\""); } deleteRoleStringBuilder.append("}}"); String expectedDeleteRoleAuditEventString = deleteRoleStringBuilder.toString(); - assertThat(generatedDeleteRoleAuditEventString, containsString(expectedDeleteRoleAuditEventString)); - generatedDeleteRoleAuditEventString = generatedDeleteRoleAuditEventString.replace(", " + expectedDeleteRoleAuditEventString, ""); - checkedFields = new HashMap<>(commonFields); + assertThat(logLine, containsString(expectedDeleteRoleAuditEventString)); + String reducedLogLine = logLine.replace(", " + expectedDeleteRoleAuditEventString, ""); + Map checkedFields = new HashMap<>(commonFields); checkedFields.remove(LoggingAuditTrail.ORIGIN_ADDRESS_FIELD_NAME); checkedFields.remove(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME); checkedFields.put("type", "audit"); checkedFields.put(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "security_config_change"); checkedFields.put(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME, "delete_role"); checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); - assertMsg(generatedDeleteRoleAuditEventString, checkedFields); + assertMsg(reducedLogLine, checkedFields); } public void testSecurityConfigChangeEventForCrossClusterApiKeys() throws IOException { @@ -1975,6 +2016,11 @@ public void testSecurityConfigChangedEventSelection() { Tuple actionAndRequest = randomFrom( new Tuple<>(PutUserAction.NAME, new PutUserRequest()), new Tuple<>(PutRoleAction.NAME, new PutRoleRequest()), + new Tuple<>( + ActionTypes.BULK_PUT_ROLES.name(), + new BulkPutRolesRequest(List.of(new RoleDescriptor(randomAlphaOfLength(20), null, null, null))) + ), + new Tuple<>(ActionTypes.BULK_DELETE_ROLES.name(), new BulkDeleteRolesRequest(List.of(randomAlphaOfLength(20)))), new Tuple<>(PutRoleMappingAction.NAME, new PutRoleMappingRequest()), new Tuple<>(TransportSetEnabledAction.TYPE.name(), new SetEnabledRequest()), new Tuple<>(TransportChangePasswordAction.TYPE.name(), new ChangePasswordRequest()), From 077b94ec6b2c43aae30f36922d422778ee5e550e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 5 Jul 2024 16:02:56 +0200 Subject: [PATCH 010/406] [8.15][DOCS] Adds references of 8.14.2 release notes file to release notes index. (#110522) --- docs/reference/release-notes.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 2e043834c9969..f8e2b74edd4e5 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -7,6 +7,7 @@ This section summarizes the changes in each release. * <> +* <> * <> * <> * <> @@ -69,6 +70,7 @@ This section summarizes the changes in each release. -- include::release-notes/8.15.0.asciidoc[] +include::release-notes/8.14.2.asciidoc[] include::release-notes/8.14.1.asciidoc[] include::release-notes/8.14.0.asciidoc[] include::release-notes/8.13.4.asciidoc[] From 3f2c81efe95815c66e9e7257be0518e805fd1abe Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Fri, 5 Jul 2024 11:13:13 -0400 Subject: [PATCH 011/406] Fix bit vector tests (#110521) (#110525) Bit vector tests were failing in cases where an index has more than 1 shards. For error cases when we expected a failure of the whole request, shards with empty documents returned success and the whoel request unexpectedly returned 200. Ensuring that index contains only 1 shard fixes these failures. Closes #110290, #110291 --- .../test/painless/146_dense_vector_bit_basic.yml | 6 ++---- muted-tests.yml | 6 ------ 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml index 3eb686bda2174..4c195a0e32623 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml @@ -8,6 +8,8 @@ setup: indices.create: index: test-index body: + settings: + number_of_shards: 1 mappings: properties: vector: @@ -107,7 +109,6 @@ setup: headers: Content-Type: application/json search: - rest_total_hits_as_int: true body: query: script_score: @@ -138,7 +139,6 @@ setup: headers: Content-Type: application/json search: - rest_total_hits_as_int: true body: query: script_score: @@ -152,7 +152,6 @@ setup: headers: Content-Type: application/json search: - rest_total_hits_as_int: true body: query: script_score: @@ -167,7 +166,6 @@ setup: headers: Content-Type: application/json search: - rest_total_hits_as_int: true body: query: script_score: diff --git a/muted-tests.yml b/muted-tests.yml index 91f38f3a5ba46..71e7d050c0e19 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -76,12 +76,6 @@ tests: - class: org.elasticsearch.compute.lucene.ValueSourceReaderTypeConversionTests method: testLoadAll issue: https://github.com/elastic/elasticsearch/issues/110244 -- class: org.elasticsearch.painless.LangPainlessClientYamlTestSuiteIT - method: test {yaml=painless/146_dense_vector_bit_basic/Cosine Similarity is not supported} - issue: https://github.com/elastic/elasticsearch/issues/110290 -- class: org.elasticsearch.painless.LangPainlessClientYamlTestSuiteIT - method: test {yaml=painless/146_dense_vector_bit_basic/Dot Product is not supported} - issue: https://github.com/elastic/elasticsearch/issues/110291 - class: org.elasticsearch.action.search.SearchProgressActionListenerIT method: testSearchProgressWithQuery issue: https://github.com/elastic/elasticsearch/issues/109867 From 779a5f08abfba0bd2b7b49ab3a2adbe35e63dad1 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 5 Jul 2024 09:14:36 -0700 Subject: [PATCH 012/406] Fix node tests for ToPartial (#110448) (#110531) This change makes the three-parameter constructor of ToPartial public so that EsqlNodeSubclassTests can pick it up properly. Closes #110310 --- muted-tests.yml | 6 ------ .../esql/expression/function/aggregate/ToPartial.java | 7 +------ .../esql/optimizer/rules/TranslateMetricsAggregate.java | 2 +- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 71e7d050c0e19..990b7d5dc5130 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -88,12 +88,6 @@ tests: - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testMinVersionAsOldVersion issue: https://github.com/elastic/elasticsearch/issues/109454 -- class: org.elasticsearch.xpack.esql.tree.EsqlNodeSubclassTests - method: testReplaceChildren {class org.elasticsearch.xpack.esql.expression.function.aggregate.ToPartial} - issue: https://github.com/elastic/elasticsearch/issues/110310 -- class: org.elasticsearch.xpack.esql.tree.EsqlNodeSubclassTests - method: testInfoParameters {class org.elasticsearch.xpack.esql.expression.function.aggregate.ToPartial} - issue: https://github.com/elastic/elasticsearch/issues/110310 - class: org.elasticsearch.search.vectors.ExactKnnQueryBuilderTests method: testToQuery issue: https://github.com/elastic/elasticsearch/issues/110357 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java index f94c8e0508cd7..c1da400185944 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java @@ -65,12 +65,7 @@ public class ToPartial extends AggregateFunction implements ToAggregator { private final Expression function; - public ToPartial(Source source, AggregateFunction function) { - super(source, function.field(), List.of(function)); - this.function = function; - } - - private ToPartial(Source source, Expression field, Expression function) { + public ToPartial(Source source, Expression field, Expression function) { super(source, field, List.of(function)); this.function = function; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java index 88486bcb864dc..1e222823ebf2b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java @@ -150,7 +150,7 @@ LogicalPlan translate(Aggregate metrics) { if (changed.get()) { secondPassAggs.add(new Alias(alias.source(), alias.name(), null, outerAgg, agg.id())); } else { - var toPartial = new Alias(agg.source(), alias.name(), new ToPartial(agg.source(), af)); + var toPartial = new Alias(agg.source(), alias.name(), new ToPartial(agg.source(), af.field(), af)); var fromPartial = new FromPartial(agg.source(), toPartial.toAttribute(), af); firstPassAggs.add(toPartial); secondPassAggs.add(new Alias(alias.source(), alias.name(), null, fromPartial, alias.id())); From 6e99d5def36277860cac3477036efc60a40a4a2d Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Fri, 5 Jul 2024 15:39:25 -0600 Subject: [PATCH 013/406] Deprecate using slm privileges to access ilm (#110540) (#110550) Currently, read_slm privilege grants access to get the ILM status, and manage_slm grants access to start/stop ILM. This access will be removed in the future, but needs to be deprecated before removal. Add deprecation warning to the read_slm and manage_slm docs. --- docs/changelog/110540.yaml | 16 ++++++++++++++++ .../security/authorization/privileges.asciidoc | 10 +++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/110540.yaml diff --git a/docs/changelog/110540.yaml b/docs/changelog/110540.yaml new file mode 100644 index 0000000000000..5e4994da80704 --- /dev/null +++ b/docs/changelog/110540.yaml @@ -0,0 +1,16 @@ +pr: 110540 +summary: Deprecate using slm privileges to access ilm +area: ILM+SLM +type: deprecation +issues: [] +deprecation: + title: Deprecate using slm privileges to access ilm + area: REST API + details: The `read_slm` privilege can get the ILM status, and + the `manage_slm` privilege can start and stop ILM. Access to these + APIs should be granted using the `read_ilm` and `manage_ilm` privileges + instead. Access to ILM APIs will be removed from SLM privileges in + a future major release, and is now deprecated. + impact: Users that need access to the ILM status API should now + use the `read_ilm` privilege. Users that need to start and stop ILM, + should use the `manage_ilm` privilege. diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index cc44c97a08129..44897baa8cb4a 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -2,7 +2,7 @@ === Security privileges :frontmatter-description: A list of privileges that can be assigned to user roles. :frontmatter-tags-products: [elasticsearch] -:frontmatter-tags-content-type: [reference] +:frontmatter-tags-content-type: [reference] :frontmatter-tags-user-goals: [secure] This section lists the privileges that you can assign to a role. @@ -198,6 +198,10 @@ All {slm} ({slm-init}) actions, including creating and updating policies and starting and stopping {slm-init}. + This privilege is not available in {serverless-full}. ++ +deprecated:[8.15] Also grants the permission to start and stop {Ilm}, using +the {ref}/ilm-start.html[ILM start] and {ref}/ilm-stop.html[ILM stop] APIs. +In a future major release, this privilege will not grant any {Ilm} permissions. `manage_token`:: All security-related operations on tokens that are generated by the {es} Token @@ -285,6 +289,10 @@ All read-only {slm-init} actions, such as getting policies and checking the {slm-init} status. + This privilege is not available in {serverless-full}. ++ +deprecated:[8.15] Also grants the permission to get the {Ilm} status, using +the {ref}/ilm-get-status.html[ILM get status API]. In a future major release, +this privilege will not grant any {Ilm} permissions. `read_security`:: All read-only security-related operations, such as getting users, user profiles, From e24b0c9b5b3cc7e2fb44e02ea1a66af14b0ab080 Mon Sep 17 00:00:00 2001 From: "Mark J. Hoy" Date: Fri, 5 Jul 2024 18:18:53 -0400 Subject: [PATCH 014/406] [Inference API] Add Amazon Bedrock support to Inference API (#110248) (#110545) * Initial commit; setup Gradle; start service * initial commit * minor cleanups, builds green; needs tests * bug fixes; tested working embeddings & completion * use custom json builder for embeddings request * Ensure auto-close; fix forbidden API * start of adding unit tests; abstraction layers * adding additional tests; cleanups * add requests unit tests * all tests created * fix cohere embeddings response * fix cohere embeddings response * fix lint * better test coverage for secrets; inference client * update thread-safe syncs; make dims/tokens + int * add tests for dims and max tokens positive integer * use requireNonNull;override settings type;cleanups * use r/w lock for client cache * remove client reference counting * update locking in cache; client errors; noop doc * remove extra block in internalGetOrCreateClient * remove duplicate dependencies; cleanup * add fxn to get default embeddings similarity * use async calls to Amazon Bedrock; cleanups * use Clock in cache; simplify locking; cleanups * cleanups around executor; remove some instanceof * cleanups; use EmbeddingRequestChunker * move max chunk size to constants * oof - swapped transport vers w/ master node req * use XContent instead of Jackson JsonFactory * remove gradle versions; do not allow dimensions --- gradle/verification-metadata.xml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + x-pack/plugin/inference/build.gradle | 29 +- .../licenses/aws-java-sdk-LICENSE.txt | 63 + .../licenses/aws-java-sdk-NOTICE.txt | 15 + .../inference/licenses/jaxb-LICENSE.txt | 274 ++++ .../plugin/inference/licenses/jaxb-NOTICE.txt | 1 + .../inference/licenses/joda-time-LICENSE.txt | 202 +++ .../inference/licenses/joda-time-NOTICE.txt | 5 + .../inference/src/main/java/module-info.java | 5 + .../InferenceNamedWriteablesProvider.java | 40 + .../xpack/inference/InferencePlugin.java | 7 + .../AmazonBedrockActionCreator.java | 56 + .../AmazonBedrockActionVisitor.java | 20 + .../AmazonBedrockChatCompletionAction.java | 47 + .../AmazonBedrockEmbeddingsAction.java | 48 + .../AmazonBedrockBaseClient.java | 37 + .../AmazonBedrockChatCompletionExecutor.java | 43 + .../amazonbedrock/AmazonBedrockClient.java | 29 + .../AmazonBedrockClientCache.java | 19 + .../AmazonBedrockEmbeddingsExecutor.java | 44 + ...AmazonBedrockExecuteOnlyRequestSender.java | 124 ++ .../amazonbedrock/AmazonBedrockExecutor.java | 68 + .../AmazonBedrockInferenceClient.java | 166 +++ .../AmazonBedrockInferenceClientCache.java | 137 ++ .../AmazonBedrockRequestSender.java | 126 ++ ...onBedrockChatCompletionRequestManager.java | 65 + ...AmazonBedrockEmbeddingsRequestManager.java | 74 ++ .../AmazonBedrockRequestExecutorService.java | 42 + .../sender/AmazonBedrockRequestManager.java | 54 + .../AmazonBedrockJsonBuilder.java | 30 + .../AmazonBedrockJsonWriter.java | 20 + .../amazonbedrock/AmazonBedrockRequest.java | 85 ++ .../amazonbedrock/NoOpHttpRequest.java | 20 + ...edrockAI21LabsCompletionRequestEntity.java | 63 + ...drockAnthropicCompletionRequestEntity.java | 70 + ...zonBedrockChatCompletionEntityFactory.java | 78 ++ .../AmazonBedrockChatCompletionRequest.java | 69 + ...nBedrockCohereCompletionRequestEntity.java | 70 + .../AmazonBedrockConverseRequestEntity.java | 18 + .../AmazonBedrockConverseUtils.java | 29 + ...zonBedrockMetaCompletionRequestEntity.java | 63 + ...BedrockMistralCompletionRequestEntity.java | 70 + ...onBedrockTitanCompletionRequestEntity.java | 63 + ...nBedrockCohereEmbeddingsRequestEntity.java | 35 + .../AmazonBedrockEmbeddingsEntityFactory.java | 45 + .../AmazonBedrockEmbeddingsRequest.java | 99 ++ ...onBedrockTitanEmbeddingsRequestEntity.java | 31 + .../amazonbedrock/AmazonBedrockResponse.java | 15 + .../AmazonBedrockResponseHandler.java | 23 + .../AmazonBedrockResponseListener.java | 30 + .../AmazonBedrockChatCompletionResponse.java | 49 + ...nBedrockChatCompletionResponseHandler.java | 39 + ...BedrockChatCompletionResponseListener.java | 40 + .../AmazonBedrockEmbeddingsResponse.java | 132 ++ ...mazonBedrockEmbeddingsResponseHandler.java | 37 + ...azonBedrockEmbeddingsResponseListener.java | 38 + .../amazonbedrock/AmazonBedrockConstants.java | 27 + .../amazonbedrock/AmazonBedrockModel.java | 88 ++ .../amazonbedrock/AmazonBedrockProvider.java | 30 + .../AmazonBedrockProviderCapabilities.java | 102 ++ .../AmazonBedrockSecretSettings.java | 110 ++ .../amazonbedrock/AmazonBedrockService.java | 350 +++++ .../AmazonBedrockServiceSettings.java | 141 ++ .../AmazonBedrockChatCompletionModel.java | 83 ++ ...rockChatCompletionRequestTaskSettings.java | 90 ++ ...nBedrockChatCompletionServiceSettings.java | 93 ++ ...azonBedrockChatCompletionTaskSettings.java | 190 +++ .../AmazonBedrockEmbeddingsModel.java | 85 ++ ...mazonBedrockEmbeddingsServiceSettings.java | 220 ++++ .../plugin-metadata/plugin-security.policy | 8 +- .../AmazonBedrockActionCreatorTests.java | 175 +++ .../AmazonBedrockExecutorTests.java | 172 +++ ...mazonBedrockInferenceClientCacheTests.java | 108 ++ .../AmazonBedrockMockClientCache.java | 62 + ...AmazonBedrockMockExecuteRequestSender.java | 80 ++ .../AmazonBedrockMockInferenceClient.java | 133 ++ .../AmazonBedrockMockRequestSender.java | 91 ++ .../AmazonBedrockRequestSenderTests.java | 127 ++ ...kAI21LabsCompletionRequestEntityTests.java | 70 + ...AnthropicCompletionRequestEntityTests.java | 82 ++ ...ockCohereCompletionRequestEntityTests.java | 82 ++ .../AmazonBedrockConverseRequestUtils.java | 94 ++ ...drockMetaCompletionRequestEntityTests.java | 70 + ...ckMistralCompletionRequestEntityTests.java | 82 ++ ...rockTitanCompletionRequestEntityTests.java | 70 + ...ockCohereEmbeddingsRequestEntityTests.java | 25 + ...rockTitanEmbeddingsRequestEntityTests.java | 24 + .../AmazonBedrockSecretSettingsTests.java | 120 ++ .../AmazonBedrockServiceTests.java | 1131 +++++++++++++++++ ...AmazonBedrockChatCompletionModelTests.java | 221 ++++ ...hatCompletionRequestTaskSettingsTests.java | 107 ++ ...ockChatCompletionServiceSettingsTests.java | 131 ++ ...edrockChatCompletionTaskSettingsTests.java | 226 ++++ .../AmazonBedrockEmbeddingsModelTests.java | 81 ++ ...BedrockEmbeddingsServiceSettingsTests.java | 404 ++++++ 96 files changed, 8790 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/inference/licenses/aws-java-sdk-LICENSE.txt create mode 100644 x-pack/plugin/inference/licenses/aws-java-sdk-NOTICE.txt create mode 100644 x-pack/plugin/inference/licenses/jaxb-LICENSE.txt create mode 100644 x-pack/plugin/inference/licenses/jaxb-NOTICE.txt create mode 100644 x-pack/plugin/inference/licenses/joda-time-LICENSE.txt create mode 100644 x-pack/plugin/inference/licenses/joda-time-NOTICE.txt create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreator.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionVisitor.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockChatCompletionAction.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockEmbeddingsAction.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockChatCompletionExecutor.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClient.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClientCache.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockEmbeddingsExecutor.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutor.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSender.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestExecutorService.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestManager.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonBuilder.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonWriter.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/NoOpHttpRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionEntityFactory.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseUtils.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsEntityFactory.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsRequest.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntity.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponse.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseHandler.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseListener.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponse.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseHandler.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseListener.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponse.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseHandler.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseListener.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockConstants.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProvider.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProviderCapabilities.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModel.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettings.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutorTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockClientCache.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockExecuteRequestSender.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockInferenceClient.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestUtils.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntityTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettingsTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModelTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettingsTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettingsTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettingsTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModelTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettingsTests.java diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index cd408ba75aa10..02313c5ed82a2 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -84,6 +84,11 @@ + + + + + diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 2004c6fda8ce5..ff50d1513d28a 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -208,6 +208,7 @@ static TransportVersion def(int id) { public static final TransportVersion TEXT_SIMILARITY_RERANKER_RETRIEVER = def(8_699_00_0); public static final TransportVersion ML_INFERENCE_GOOGLE_VERTEX_AI_RERANKING_ADDED = def(8_700_00_0); public static final TransportVersion VERSIONED_MASTER_NODE_REQUESTS = def(8_701_00_0); + public static final TransportVersion ML_INFERENCE_AMAZON_BEDROCK_ADDED = def(8_702_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index 41ca9966c1336..beeec94f21ebf 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -27,6 +27,10 @@ base { archivesName = 'x-pack-inference' } +versions << [ + 'awsbedrockruntime': '1.12.740' +] + dependencies { implementation project(path: ':libs:elasticsearch-logging') compileOnly project(":server") @@ -53,10 +57,19 @@ dependencies { implementation 'com.google.http-client:google-http-client-appengine:1.42.3' implementation 'com.google.http-client:google-http-client-jackson2:1.42.3' implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + implementation "com.fasterxml.jackson:jackson-bom:${versions.jackson}" implementation 'com.google.api:gax-httpjson:0.105.1' implementation 'io.grpc:grpc-context:1.49.2' implementation 'io.opencensus:opencensus-api:0.31.1' implementation 'io.opencensus:opencensus-contrib-http-util:0.31.1' + implementation "com.amazonaws:aws-java-sdk-bedrockruntime:${versions.awsbedrockruntime}" + implementation "com.amazonaws:aws-java-sdk-core:${versions.aws}" + implementation "com.amazonaws:jmespath-java:${versions.aws}" + implementation "joda-time:joda-time:2.10.10" + implementation 'javax.xml.bind:jaxb-api:2.2.2' } tasks.named("dependencyLicenses").configure { @@ -66,6 +79,9 @@ tasks.named("dependencyLicenses").configure { mapping from: /protobuf.*/, to: 'protobuf' mapping from: /proto-google.*/, to: 'proto-google' mapping from: /jackson.*/, to: 'jackson' + mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk' + mapping from: /jmespath-java.*/, to: 'aws-java-sdk' + mapping from: /jaxb-.*/, to: 'jaxb' } tasks.named("thirdPartyAudit").configure { @@ -199,10 +215,21 @@ tasks.named("thirdPartyAudit").configure { 'com.google.appengine.api.urlfetch.HTTPRequest', 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', - 'com.google.appengine.api.urlfetch.URLFetchServiceFactory' + 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', + 'software.amazon.ion.IonReader', + 'software.amazon.ion.IonSystem', + 'software.amazon.ion.IonType', + 'software.amazon.ion.IonWriter', + 'software.amazon.ion.Timestamp', + 'software.amazon.ion.system.IonBinaryWriterBuilder', + 'software.amazon.ion.system.IonSystemBuilder', + 'software.amazon.ion.system.IonTextWriterBuilder', + 'software.amazon.ion.system.IonWriterBuilder', + 'javax.activation.DataHandler' ) } tasks.named('yamlRestTest') { usesDefaultDistribution() } + diff --git a/x-pack/plugin/inference/licenses/aws-java-sdk-LICENSE.txt b/x-pack/plugin/inference/licenses/aws-java-sdk-LICENSE.txt new file mode 100644 index 0000000000000..98d1f9319f374 --- /dev/null +++ b/x-pack/plugin/inference/licenses/aws-java-sdk-LICENSE.txt @@ -0,0 +1,63 @@ +Apache License +Version 2.0, January 2004 + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and + 2. You must cause any modified files to carry prominent notices stating that You changed the files; and + 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +Note: Other license terms may apply to certain, identified software files contained within or distributed with the accompanying software if such terms are included in the directory containing the accompanying software. Such other license terms will then apply in lieu of the terms of the software license above. + +JSON processing code subject to the JSON License from JSON.org: + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/x-pack/plugin/inference/licenses/aws-java-sdk-NOTICE.txt b/x-pack/plugin/inference/licenses/aws-java-sdk-NOTICE.txt new file mode 100644 index 0000000000000..565bd6085c71a --- /dev/null +++ b/x-pack/plugin/inference/licenses/aws-java-sdk-NOTICE.txt @@ -0,0 +1,15 @@ +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. +- JSON parsing and utility functions from JSON.org - Copyright 2002 JSON.org. +- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. + +The licenses for these third party components are included in LICENSE.txt diff --git a/x-pack/plugin/inference/licenses/jaxb-LICENSE.txt b/x-pack/plugin/inference/licenses/jaxb-LICENSE.txt new file mode 100644 index 0000000000000..833a843cfeee1 --- /dev/null +++ b/x-pack/plugin/inference/licenses/jaxb-LICENSE.txt @@ -0,0 +1,274 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than Source Code. + + 1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of any of the following: + + A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; + + B. Any new file that contains any part of the Original Software or previous Modification; or + + C. Any new file that is contributed or otherwise made available under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. + +---------- +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) +The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. + + + + +The GNU General Public License (GPL) Version 2, June 1991 + + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification follow. + + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. + + c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. + +3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. + +If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. + +This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + + Copyright (C) + + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. + + signature of Ty Coon, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. + + +"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 + +Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code." + +Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. + +As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. diff --git a/x-pack/plugin/inference/licenses/jaxb-NOTICE.txt b/x-pack/plugin/inference/licenses/jaxb-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/x-pack/plugin/inference/licenses/jaxb-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/x-pack/plugin/inference/licenses/joda-time-LICENSE.txt b/x-pack/plugin/inference/licenses/joda-time-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/inference/licenses/joda-time-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/joda-time-NOTICE.txt b/x-pack/plugin/inference/licenses/joda-time-NOTICE.txt new file mode 100644 index 0000000000000..dffbcf31cacf6 --- /dev/null +++ b/x-pack/plugin/inference/licenses/joda-time-NOTICE.txt @@ -0,0 +1,5 @@ +============================================================================= += NOTICE file corresponding to section 4d of the Apache License Version 2.0 = +============================================================================= +This product includes software developed by +Joda.org (http://www.joda.org/). diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index aa907a236884a..a7e5718a0920e 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -20,8 +20,13 @@ requires org.apache.lucene.join; requires com.ibm.icu; requires com.google.auth.oauth2; + requires com.google.auth; requires com.google.api.client; requires com.google.gson; + requires aws.java.sdk.bedrockruntime; + requires aws.java.sdk.core; + requires com.fasterxml.jackson.databind; + requires org.joda.time; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index f3799b824fc0e..f8ce9df1fb194 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -24,6 +24,10 @@ import org.elasticsearch.xpack.core.inference.results.LegacyTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionServiceSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionServiceSettings; import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettings; import org.elasticsearch.xpack.inference.services.azureaistudio.completion.AzureAiStudioChatCompletionServiceSettings; @@ -122,10 +126,46 @@ public static List getNamedWriteables() { addMistralNamedWriteables(namedWriteables); addCustomElandWriteables(namedWriteables); addAnthropicNamedWritables(namedWriteables); + addAmazonBedrockNamedWriteables(namedWriteables); return namedWriteables; } + private static void addAmazonBedrockNamedWriteables(List namedWriteables) { + namedWriteables.add( + new NamedWriteableRegistry.Entry( + AmazonBedrockSecretSettings.class, + AmazonBedrockSecretSettings.NAME, + AmazonBedrockSecretSettings::new + ) + ); + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + AmazonBedrockEmbeddingsServiceSettings.NAME, + AmazonBedrockEmbeddingsServiceSettings::new + ) + ); + + // no task settings for Amazon Bedrock Embeddings + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + AmazonBedrockChatCompletionServiceSettings.NAME, + AmazonBedrockChatCompletionServiceSettings::new + ) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + TaskSettings.class, + AmazonBedrockChatCompletionTaskSettings.NAME, + AmazonBedrockChatCompletionTaskSettings::new + ) + ); + } + private static void addMistralNamedWriteables(List namedWriteables) { namedWriteables.add( new NamedWriteableRegistry.Entry( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 1db5b4135ee94..1c388f7399260 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -53,6 +53,7 @@ import org.elasticsearch.xpack.inference.action.TransportPutInferenceModelAction; import org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilter; import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockRequestSender; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; @@ -70,6 +71,7 @@ import org.elasticsearch.xpack.inference.rest.RestInferenceAction; import org.elasticsearch.xpack.inference.rest.RestPutInferenceModelAction; import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockService; import org.elasticsearch.xpack.inference.services.anthropic.AnthropicService; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioService; import org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiService; @@ -117,6 +119,7 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP private final Settings settings; private final SetOnce httpFactory = new SetOnce<>(); + private final SetOnce amazonBedrockFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); private final SetOnce inferenceServiceRegistry = new SetOnce<>(); @@ -170,6 +173,9 @@ public Collection createComponents(PluginServices services) { var httpRequestSenderFactory = new HttpRequestSender.Factory(serviceComponents.get(), httpClientManager, services.clusterService()); httpFactory.set(httpRequestSenderFactory); + var amazonBedrockRequestSenderFactory = new AmazonBedrockRequestSender.Factory(serviceComponents.get(), services.clusterService()); + amazonBedrockFactory.set(amazonBedrockRequestSenderFactory); + ModelRegistry modelRegistry = new ModelRegistry(services.client()); if (inferenceServiceExtensions == null) { @@ -209,6 +215,7 @@ public List getInferenceServiceFactories() { context -> new GoogleVertexAiService(httpFactory.get(), serviceComponents.get()), context -> new MistralService(httpFactory.get(), serviceComponents.get()), context -> new AnthropicService(httpFactory.get(), serviceComponents.get()), + context -> new AmazonBedrockService(httpFactory.get(), amazonBedrockFactory.get(), serviceComponents.get()), ElasticsearchInternalService::new ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreator.java new file mode 100644 index 0000000000000..5f9fc532e33b2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreator.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockChatCompletionRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockEmbeddingsRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; + +public class AmazonBedrockActionCreator implements AmazonBedrockActionVisitor { + private final Sender sender; + private final ServiceComponents serviceComponents; + private final TimeValue timeout; + + public AmazonBedrockActionCreator(Sender sender, ServiceComponents serviceComponents, @Nullable TimeValue timeout) { + this.sender = Objects.requireNonNull(sender); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + this.timeout = timeout; + } + + @Override + public ExecutableAction create(AmazonBedrockEmbeddingsModel embeddingsModel, Map taskSettings) { + var overriddenModel = AmazonBedrockEmbeddingsModel.of(embeddingsModel, taskSettings); + var requestManager = new AmazonBedrockEmbeddingsRequestManager( + overriddenModel, + serviceComponents.truncator(), + serviceComponents.threadPool(), + timeout + ); + var errorMessage = constructFailedToSendRequestMessage(null, "Amazon Bedrock embeddings"); + return new AmazonBedrockEmbeddingsAction(sender, requestManager, errorMessage); + } + + @Override + public ExecutableAction create(AmazonBedrockChatCompletionModel completionModel, Map taskSettings) { + var overriddenModel = AmazonBedrockChatCompletionModel.of(completionModel, taskSettings); + var requestManager = new AmazonBedrockChatCompletionRequestManager(overriddenModel, serviceComponents.threadPool(), timeout); + var errorMessage = constructFailedToSendRequestMessage(null, "Amazon Bedrock completion"); + return new AmazonBedrockChatCompletionAction(sender, requestManager, errorMessage); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionVisitor.java new file mode 100644 index 0000000000000..b540d030eb3f7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionVisitor.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.Map; + +public interface AmazonBedrockActionVisitor { + ExecutableAction create(AmazonBedrockEmbeddingsModel embeddingsModel, Map taskSettings); + + ExecutableAction create(AmazonBedrockChatCompletionModel completionModel, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockChatCompletionAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockChatCompletionAction.java new file mode 100644 index 0000000000000..9d3c39d3ac4d9 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockChatCompletionAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; + +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class AmazonBedrockChatCompletionAction implements ExecutableAction { + private final Sender sender; + private final RequestManager requestManager; + private final String errorMessage; + + public AmazonBedrockChatCompletionAction(Sender sender, RequestManager requestManager, String errorMessage) { + this.sender = Objects.requireNonNull(sender); + this.requestManager = Objects.requireNonNull(requestManager); + this.errorMessage = Objects.requireNonNull(errorMessage); + } + + @Override + public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + sender.send(requestManager, inferenceInputs, timeout, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockEmbeddingsAction.java new file mode 100644 index 0000000000000..3f8be0c3cccbe --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockEmbeddingsAction.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; + +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class AmazonBedrockEmbeddingsAction implements ExecutableAction { + + private final Sender sender; + private final RequestManager requestManager; + private final String errorMessage; + + public AmazonBedrockEmbeddingsAction(Sender sender, RequestManager requestManager, String errorMessage) { + this.sender = Objects.requireNonNull(sender); + this.requestManager = Objects.requireNonNull(requestManager); + this.errorMessage = Objects.requireNonNull(errorMessage); + } + + @Override + public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + sender.send(requestManager, inferenceInputs, timeout, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java new file mode 100644 index 0000000000000..f9e403582a0ec --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.time.Clock; +import java.util.Objects; + +public abstract class AmazonBedrockBaseClient implements AmazonBedrockClient { + protected final Integer modelKeysAndRegionHashcode; + protected Clock clock = Clock.systemUTC(); + + protected AmazonBedrockBaseClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + Objects.requireNonNull(model); + this.modelKeysAndRegionHashcode = getModelKeysAndRegionHashcode(model, timeout); + } + + public static Integer getModelKeysAndRegionHashcode(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var secretSettings = model.getSecretSettings(); + var serviceSettings = model.getServiceSettings(); + return Objects.hash(secretSettings.accessKey, secretSettings.secretKey, serviceSettings.region(), timeout); + } + + public final void setClock(Clock clock) { + this.clock = clock; + } + + abstract void close(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockChatCompletionExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockChatCompletionExecutor.java new file mode 100644 index 0000000000000..a4e0c399517c1 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockChatCompletionExecutor.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseListener; + +import java.util.function.Supplier; + +public class AmazonBedrockChatCompletionExecutor extends AmazonBedrockExecutor { + private final AmazonBedrockChatCompletionRequest chatCompletionRequest; + + protected AmazonBedrockChatCompletionExecutor( + AmazonBedrockChatCompletionRequest request, + AmazonBedrockResponseHandler responseHandler, + Logger logger, + Supplier hasRequestCompletedFunction, + ActionListener inferenceResultsListener, + AmazonBedrockClientCache clientCache + ) { + super(request, responseHandler, logger, hasRequestCompletedFunction, inferenceResultsListener, clientCache); + this.chatCompletionRequest = request; + } + + @Override + protected void executeClientRequest(AmazonBedrockBaseClient awsBedrockClient) { + var chatCompletionResponseListener = new AmazonBedrockChatCompletionResponseListener( + chatCompletionRequest, + responseHandler, + inferenceResultsListener + ); + chatCompletionRequest.executeChatCompletionRequest(awsBedrockClient, chatCompletionResponseListener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClient.java new file mode 100644 index 0000000000000..812e76129c420 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClient.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelRequest; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; + +import java.time.Instant; + +public interface AmazonBedrockClient { + void converse(ConverseRequest converseRequest, ActionListener responseListener) throws ElasticsearchException; + + void invokeModel(InvokeModelRequest invokeModelRequest, ActionListener responseListener) + throws ElasticsearchException; + + boolean isExpired(Instant currentTimestampMs); + + void resetExpiration(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClientCache.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClientCache.java new file mode 100644 index 0000000000000..e6bb99620b581 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClientCache.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.io.Closeable; +import java.io.IOException; + +public interface AmazonBedrockClientCache extends Closeable { + AmazonBedrockBaseClient getOrCreateClient(AmazonBedrockModel model, @Nullable TimeValue timeout) throws IOException; +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockEmbeddingsExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockEmbeddingsExecutor.java new file mode 100644 index 0000000000000..6da3f86e0909a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockEmbeddingsExecutor.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseListener; + +import java.util.function.Supplier; + +public class AmazonBedrockEmbeddingsExecutor extends AmazonBedrockExecutor { + + private final AmazonBedrockEmbeddingsRequest embeddingsRequest; + + protected AmazonBedrockEmbeddingsExecutor( + AmazonBedrockEmbeddingsRequest request, + AmazonBedrockResponseHandler responseHandler, + Logger logger, + Supplier hasRequestCompletedFunction, + ActionListener inferenceResultsListener, + AmazonBedrockClientCache clientCache + ) { + super(request, responseHandler, logger, hasRequestCompletedFunction, inferenceResultsListener, clientCache); + this.embeddingsRequest = request; + } + + @Override + protected void executeClientRequest(AmazonBedrockBaseClient awsBedrockClient) { + var embeddingsResponseListener = new AmazonBedrockEmbeddingsResponseListener( + embeddingsRequest, + responseHandler, + inferenceResultsListener + ); + embeddingsRequest.executeEmbeddingsRequest(awsBedrockClient, embeddingsResponseListener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java new file mode 100644 index 0000000000000..a08acab655936 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.core.Strings.format; + +/** + * The AWS SDK uses its own internal retrier and timeout values on the client + */ +public class AmazonBedrockExecuteOnlyRequestSender implements RequestSender { + + protected final AmazonBedrockClientCache clientCache; + private final ThrottlerManager throttleManager; + + public AmazonBedrockExecuteOnlyRequestSender(AmazonBedrockClientCache clientCache, ThrottlerManager throttlerManager) { + this.clientCache = Objects.requireNonNull(clientCache); + this.throttleManager = Objects.requireNonNull(throttlerManager); + } + + @Override + public void send( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestTimedOutFunction, + ResponseHandler responseHandler, + ActionListener listener + ) { + if (request instanceof AmazonBedrockRequest awsRequest && responseHandler instanceof AmazonBedrockResponseHandler awsResponse) { + try { + var executor = createExecutor(awsRequest, awsResponse, logger, hasRequestTimedOutFunction, listener); + + // the run method will call the listener to return the proper value + executor.run(); + return; + } catch (Exception e) { + logException(logger, request, e); + listener.onFailure(wrapWithElasticsearchException(e, request.getInferenceEntityId())); + } + } + + listener.onFailure(new ElasticsearchException("Amazon Bedrock request was not the correct type")); + } + + // allow this to be overridden for testing + protected AmazonBedrockExecutor createExecutor( + AmazonBedrockRequest awsRequest, + AmazonBedrockResponseHandler awsResponse, + Logger logger, + Supplier hasRequestTimedOutFunction, + ActionListener listener + ) { + switch (awsRequest.taskType()) { + case COMPLETION -> { + return new AmazonBedrockChatCompletionExecutor( + (AmazonBedrockChatCompletionRequest) awsRequest, + awsResponse, + logger, + hasRequestTimedOutFunction, + listener, + clientCache + ); + } + case TEXT_EMBEDDING -> { + return new AmazonBedrockEmbeddingsExecutor( + (AmazonBedrockEmbeddingsRequest) awsRequest, + awsResponse, + logger, + hasRequestTimedOutFunction, + listener, + clientCache + ); + } + default -> { + throw new UnsupportedOperationException("Unsupported task type [" + awsRequest.taskType() + "] for Amazon Bedrock request"); + } + } + } + + private void logException(Logger logger, Request request, Exception exception) { + var causeException = ExceptionsHelper.unwrapCause(exception); + + throttleManager.warn( + logger, + format("Failed while sending request from inference entity id [%s] of type [amazonbedrock]", request.getInferenceEntityId()), + causeException + ); + } + + private Exception wrapWithElasticsearchException(Exception e, String inferenceEntityId) { + return new ElasticsearchException( + format("Amazon Bedrock client failed to send request from inference entity id [%s]", inferenceEntityId), + e + ); + } + + public void shutdown() throws IOException { + this.clientCache.close(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutor.java new file mode 100644 index 0000000000000..fa220ee5d2831 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutor.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.util.Objects; +import java.util.function.Supplier; + +public abstract class AmazonBedrockExecutor implements Runnable { + protected final AmazonBedrockModel baseModel; + protected final AmazonBedrockResponseHandler responseHandler; + protected final Logger logger; + protected final AmazonBedrockRequest request; + protected final Supplier hasRequestCompletedFunction; + protected final ActionListener inferenceResultsListener; + protected final AmazonBedrockClientCache clientCache; + + protected AmazonBedrockExecutor( + AmazonBedrockRequest request, + AmazonBedrockResponseHandler responseHandler, + Logger logger, + Supplier hasRequestCompletedFunction, + ActionListener inferenceResultsListener, + AmazonBedrockClientCache clientCache + ) { + this.request = Objects.requireNonNull(request); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.logger = Objects.requireNonNull(logger); + this.hasRequestCompletedFunction = Objects.requireNonNull(hasRequestCompletedFunction); + this.inferenceResultsListener = Objects.requireNonNull(inferenceResultsListener); + this.clientCache = Objects.requireNonNull(clientCache); + this.baseModel = request.model(); + } + + @Override + public void run() { + if (hasRequestCompletedFunction.get()) { + // has already been run + return; + } + + var inferenceEntityId = baseModel.getInferenceEntityId(); + + try { + var awsBedrockClient = clientCache.getOrCreateClient(baseModel, request.timeout()); + executeClientRequest(awsBedrockClient); + } catch (Exception e) { + var errorMessage = Strings.format("Failed to send request from inference entity id [%s]", inferenceEntityId); + logger.warn(errorMessage, e); + inferenceResultsListener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } + + protected abstract void executeClientRequest(AmazonBedrockBaseClient awsBedrockClient); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java new file mode 100644 index 0000000000000..c3d458925268c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.services.bedrockruntime.AmazonBedrockRuntimeAsync; +import com.amazonaws.services.bedrockruntime.AmazonBedrockRuntimeAsyncClientBuilder; +import com.amazonaws.services.bedrockruntime.model.AmazonBedrockRuntimeException; +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelRequest; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.time.Duration; +import java.time.Instant; +import java.util.Objects; + +/** + * Not marking this as "final" so we can subclass it for mocking + */ +public class AmazonBedrockInferenceClient extends AmazonBedrockBaseClient { + + // package-private for testing + static final int CLIENT_CACHE_EXPIRY_MINUTES = 5; + private static final int DEFAULT_CLIENT_TIMEOUT_MS = 10000; + + private final AmazonBedrockRuntimeAsync internalClient; + private volatile Instant expiryTimestamp; + + public static AmazonBedrockBaseClient create(AmazonBedrockModel model, @Nullable TimeValue timeout) { + try { + return new AmazonBedrockInferenceClient(model, timeout); + } catch (Exception e) { + throw new ElasticsearchException("Failed to create Amazon Bedrock Client", e); + } + } + + protected AmazonBedrockInferenceClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + super(model, timeout); + this.internalClient = createAmazonBedrockClient(model, timeout); + setExpiryTimestamp(); + } + + @Override + public void converse(ConverseRequest converseRequest, ActionListener responseListener) throws ElasticsearchException { + try { + var responseFuture = internalClient.converseAsync(converseRequest); + responseListener.onResponse(responseFuture.get()); + } catch (AmazonBedrockRuntimeException amazonBedrockRuntimeException) { + responseListener.onFailure( + new ElasticsearchException( + Strings.format("AmazonBedrock converse failure: [%s]", amazonBedrockRuntimeException.getMessage()), + amazonBedrockRuntimeException + ) + ); + } catch (ElasticsearchException elasticsearchException) { + // just throw the exception if we have one + responseListener.onFailure(elasticsearchException); + } catch (Exception e) { + responseListener.onFailure(new ElasticsearchException("Amazon Bedrock client converse call failed", e)); + } + } + + @Override + public void invokeModel(InvokeModelRequest invokeModelRequest, ActionListener responseListener) + throws ElasticsearchException { + try { + var responseFuture = internalClient.invokeModelAsync(invokeModelRequest); + responseListener.onResponse(responseFuture.get()); + } catch (AmazonBedrockRuntimeException amazonBedrockRuntimeException) { + responseListener.onFailure( + new ElasticsearchException( + Strings.format("AmazonBedrock invoke model failure: [%s]", amazonBedrockRuntimeException.getMessage()), + amazonBedrockRuntimeException + ) + ); + } catch (ElasticsearchException elasticsearchException) { + // just throw the exception if we have one + responseListener.onFailure(elasticsearchException); + } catch (Exception e) { + responseListener.onFailure(new ElasticsearchException(e)); + } + } + + // allow this to be overridden for test mocks + protected AmazonBedrockRuntimeAsync createAmazonBedrockClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var secretSettings = model.getSecretSettings(); + var credentials = new BasicAWSCredentials(secretSettings.accessKey.toString(), secretSettings.secretKey.toString()); + var credentialsProvider = new AWSStaticCredentialsProvider(credentials); + var clientConfig = timeout == null + ? new ClientConfiguration().withConnectionTimeout(DEFAULT_CLIENT_TIMEOUT_MS) + : new ClientConfiguration().withConnectionTimeout((int) timeout.millis()); + + var serviceSettings = model.getServiceSettings(); + + try { + SpecialPermission.check(); + AmazonBedrockRuntimeAsyncClientBuilder builder = AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> AmazonBedrockRuntimeAsyncClientBuilder.standard() + .withCredentials(credentialsProvider) + .withRegion(serviceSettings.region()) + .withClientConfiguration(clientConfig) + ); + + return SocketAccess.doPrivileged(builder::build); + } catch (AmazonBedrockRuntimeException amazonBedrockRuntimeException) { + throw new ElasticsearchException( + Strings.format("failed to create AmazonBedrockRuntime client: [%s]", amazonBedrockRuntimeException.getMessage()), + amazonBedrockRuntimeException + ); + } catch (Exception e) { + throw new ElasticsearchException("failed to create AmazonBedrockRuntime client", e); + } + } + + private void setExpiryTimestamp() { + this.expiryTimestamp = clock.instant().plus(Duration.ofMinutes(CLIENT_CACHE_EXPIRY_MINUTES)); + } + + @Override + public boolean isExpired(Instant currentTimestampMs) { + Objects.requireNonNull(currentTimestampMs); + return currentTimestampMs.isAfter(expiryTimestamp); + } + + public void resetExpiration() { + setExpiryTimestamp(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockInferenceClient that = (AmazonBedrockInferenceClient) o; + return Objects.equals(modelKeysAndRegionHashcode, that.modelKeysAndRegionHashcode); + } + + @Override + public int hashCode() { + return this.modelKeysAndRegionHashcode; + } + + // make this package-private so only the cache can close it + @Override + void close() { + internalClient.shutdown(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java new file mode 100644 index 0000000000000..e245365c214af --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.http.IdleConnectionReaper; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiFunction; + +public final class AmazonBedrockInferenceClientCache implements AmazonBedrockClientCache { + + private final BiFunction creator; + private final Map clientsCache = new ConcurrentHashMap<>(); + private final ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(); + + // not final for testing + private Clock clock; + + public AmazonBedrockInferenceClientCache( + BiFunction creator, + @Nullable Clock clock + ) { + this.creator = Objects.requireNonNull(creator); + this.clock = Objects.requireNonNullElse(clock, Clock.systemUTC()); + } + + public AmazonBedrockBaseClient getOrCreateClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var returnClient = internalGetOrCreateClient(model, timeout); + flushExpiredClients(); + return returnClient; + } + + private AmazonBedrockBaseClient internalGetOrCreateClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + final Integer modelHash = AmazonBedrockInferenceClient.getModelKeysAndRegionHashcode(model, timeout); + cacheLock.readLock().lock(); + try { + return clientsCache.computeIfAbsent(modelHash, hashKey -> { + final AmazonBedrockBaseClient builtClient = creator.apply(model, timeout); + builtClient.setClock(clock); + builtClient.resetExpiration(); + return builtClient; + }); + } finally { + cacheLock.readLock().unlock(); + } + } + + private void flushExpiredClients() { + var currentTimestampMs = clock.instant(); + var expiredClients = new ArrayList>(); + + cacheLock.readLock().lock(); + try { + for (final Map.Entry client : clientsCache.entrySet()) { + if (client.getValue().isExpired(currentTimestampMs)) { + expiredClients.add(client); + } + } + + if (expiredClients.isEmpty()) { + return; + } + + cacheLock.readLock().unlock(); + cacheLock.writeLock().lock(); + try { + for (final Map.Entry client : expiredClients) { + var removed = clientsCache.remove(client.getKey()); + if (removed != null) { + removed.close(); + } + } + } finally { + cacheLock.readLock().lock(); + cacheLock.writeLock().unlock(); + } + } finally { + cacheLock.readLock().unlock(); + } + } + + @Override + public void close() throws IOException { + releaseCachedClients(); + } + + private void releaseCachedClients() { + // as we're closing and flushing all of these - we'll use a write lock + // across the whole operation to ensure this stays in sync + cacheLock.writeLock().lock(); + try { + // ensure all the clients are closed before we clear + for (final AmazonBedrockBaseClient client : clientsCache.values()) { + client.close(); + } + + // clear previously cached clients, they will be build lazily + clientsCache.clear(); + } finally { + cacheLock.writeLock().unlock(); + } + + // shutdown IdleConnectionReaper background thread + // it will be restarted on new client usage + IdleConnectionReaper.shutdown(); + } + + // used for testing + int clientCount() { + cacheLock.readLock().lock(); + try { + return clientsCache.size(); + } finally { + cacheLock.readLock().unlock(); + } + } + + // used for testing + void setClock(Clock newClock) { + this.clock = Objects.requireNonNull(newClock); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSender.java new file mode 100644 index 0000000000000..e23b0274ede26 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSender.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockRequestExecutorService; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +public class AmazonBedrockRequestSender implements Sender { + + public static class Factory { + private final ServiceComponents serviceComponents; + private final ClusterService clusterService; + + public Factory(ServiceComponents serviceComponents, ClusterService clusterService) { + this.serviceComponents = Objects.requireNonNull(serviceComponents); + this.clusterService = Objects.requireNonNull(clusterService); + } + + public Sender createSender() { + var clientCache = new AmazonBedrockInferenceClientCache(AmazonBedrockInferenceClient::create, null); + return createSender(new AmazonBedrockExecuteOnlyRequestSender(clientCache, serviceComponents.throttlerManager())); + } + + Sender createSender(AmazonBedrockExecuteOnlyRequestSender requestSender) { + var sender = new AmazonBedrockRequestSender( + serviceComponents.threadPool(), + clusterService, + serviceComponents.settings(), + Objects.requireNonNull(requestSender) + ); + // ensure this is started + sender.start(); + return sender; + } + } + + private static final TimeValue START_COMPLETED_WAIT_TIME = TimeValue.timeValueSeconds(5); + + private final ThreadPool threadPool; + private final AmazonBedrockRequestExecutorService executorService; + private final AtomicBoolean started = new AtomicBoolean(false); + private final CountDownLatch startCompleted = new CountDownLatch(1); + + protected AmazonBedrockRequestSender( + ThreadPool threadPool, + ClusterService clusterService, + Settings settings, + AmazonBedrockExecuteOnlyRequestSender requestSender + ) { + this.threadPool = Objects.requireNonNull(threadPool); + executorService = new AmazonBedrockRequestExecutorService( + threadPool, + startCompleted, + new RequestExecutorServiceSettings(settings, clusterService), + requestSender + ); + } + + @Override + public void start() { + if (started.compareAndSet(false, true)) { + // The manager must be started before the executor service. That way we guarantee that the http client + // is ready prior to the service attempting to use the http client to send a request + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(executorService::start); + waitForStartToComplete(); + } + } + + private void waitForStartToComplete() { + try { + if (startCompleted.await(START_COMPLETED_WAIT_TIME.getSeconds(), TimeUnit.SECONDS) == false) { + throw new IllegalStateException("Amazon Bedrock sender startup did not complete in time"); + } + } catch (InterruptedException e) { + throw new IllegalStateException("Amazon Bedrock sender interrupted while waiting for startup to complete"); + } + } + + @Override + public void send( + RequestManager requestCreator, + InferenceInputs inferenceInputs, + TimeValue timeout, + ActionListener listener + ) { + assert started.get() : "Amazon Bedrock request sender: call start() before sending a request"; + waitForStartToComplete(); + + if (requestCreator instanceof AmazonBedrockRequestManager amazonBedrockRequestManager) { + executorService.execute(amazonBedrockRequestManager, inferenceInputs, timeout, listener); + return; + } + + listener.onFailure(new ElasticsearchException("Amazon Bedrock request sender did not receive a valid request request manager")); + } + + @Override + public void close() throws IOException { + executorService.shutdown(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java new file mode 100644 index 0000000000000..1d8226664979c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionEntityFactory; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; + +import java.util.List; +import java.util.function.Supplier; + +public class AmazonBedrockChatCompletionRequestManager extends AmazonBedrockRequestManager { + private static final Logger logger = LogManager.getLogger(AmazonBedrockChatCompletionRequestManager.class); + private final AmazonBedrockChatCompletionModel model; + + public AmazonBedrockChatCompletionRequestManager( + AmazonBedrockChatCompletionModel model, + ThreadPool threadPool, + @Nullable TimeValue timeout + ) { + super(model, threadPool, timeout); + this.model = model; + } + + @Override + public void execute( + String query, + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + var requestEntity = AmazonBedrockChatCompletionEntityFactory.createEntity(model, input); + var request = new AmazonBedrockChatCompletionRequest(model, requestEntity, timeout); + var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); + + try { + requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + } catch (Exception e) { + var errorMessage = Strings.format( + "Failed to send [completion] request from inference entity id [%s]", + request.getInferenceEntityId() + ); + logger.warn(errorMessage, e); + listener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java new file mode 100644 index 0000000000000..e9bc6b574865c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsEntityFactory; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.inference.common.Truncator.truncate; + +public class AmazonBedrockEmbeddingsRequestManager extends AmazonBedrockRequestManager { + private static final Logger logger = LogManager.getLogger(AmazonBedrockEmbeddingsRequestManager.class); + + private final AmazonBedrockEmbeddingsModel embeddingsModel; + private final Truncator truncator; + + public AmazonBedrockEmbeddingsRequestManager( + AmazonBedrockEmbeddingsModel model, + Truncator truncator, + ThreadPool threadPool, + @Nullable TimeValue timeout + ) { + super(model, threadPool, timeout); + this.embeddingsModel = model; + this.truncator = Objects.requireNonNull(truncator); + } + + @Override + public void execute( + String query, + List input, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + var serviceSettings = embeddingsModel.getServiceSettings(); + var truncatedInput = truncate(input, serviceSettings.maxInputTokens()); + var requestEntity = AmazonBedrockEmbeddingsEntityFactory.createEntity(embeddingsModel, truncatedInput); + var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); + var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, embeddingsModel, requestEntity, timeout); + try { + requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + } catch (Exception e) { + var errorMessage = Strings.format( + "Failed to send [text_embedding] request from inference entity id [%s]", + request.getInferenceEntityId() + ); + logger.warn(errorMessage, e); + listener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestExecutorService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestExecutorService.java new file mode 100644 index 0000000000000..8b4672d45c250 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestExecutorService.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockExecuteOnlyRequestSender; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +/** + * Allows this to have a public interface for Amazon Bedrock support + */ +public class AmazonBedrockRequestExecutorService extends RequestExecutorService { + + private final AmazonBedrockExecuteOnlyRequestSender requestSender; + + public AmazonBedrockRequestExecutorService( + ThreadPool threadPool, + CountDownLatch startupLatch, + RequestExecutorServiceSettings settings, + AmazonBedrockExecuteOnlyRequestSender requestSender + ) { + super(threadPool, startupLatch, settings, requestSender); + this.requestSender = requestSender; + } + + @Override + public void shutdown() { + super.shutdown(); + try { + requestSender.shutdown(); + } catch (IOException e) { + // swallow the exception + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestManager.java new file mode 100644 index 0000000000000..f75343b038368 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestManager.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.util.Objects; + +public abstract class AmazonBedrockRequestManager implements RequestManager { + + protected final ThreadPool threadPool; + protected final TimeValue timeout; + private final AmazonBedrockModel baseModel; + + protected AmazonBedrockRequestManager(AmazonBedrockModel baseModel, ThreadPool threadPool, @Nullable TimeValue timeout) { + this.baseModel = Objects.requireNonNull(baseModel); + this.threadPool = Objects.requireNonNull(threadPool); + this.timeout = timeout; + } + + @Override + public String inferenceEntityId() { + return baseModel.getInferenceEntityId(); + } + + @Override + public RateLimitSettings rateLimitSettings() { + return baseModel.rateLimitSettings(); + } + + record RateLimitGrouping(int keyHash) { + public static AmazonBedrockRequestManager.RateLimitGrouping of(AmazonBedrockModel model) { + Objects.requireNonNull(model); + + var awsSecretSettings = model.getSecretSettings(); + + return new RateLimitGrouping(Objects.hash(awsSecretSettings.accessKey, awsSecretSettings.secretKey)); + } + } + + @Override + public Object rateLimitGrouping() { + return RateLimitGrouping.of(this.baseModel); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonBuilder.java new file mode 100644 index 0000000000000..829e899beba5e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonBuilder.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; + +public class AmazonBedrockJsonBuilder { + + private final ToXContent jsonWriter; + + public AmazonBedrockJsonBuilder(ToXContent jsonWriter) { + this.jsonWriter = jsonWriter; + } + + public String getStringContent() throws IOException { + try (var builder = jsonBuilder()) { + return Strings.toString(jsonWriter.toXContent(builder, null)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonWriter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonWriter.java new file mode 100644 index 0000000000000..83ebcb4563a8c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonWriter.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; + +/** + * This is needed as the input for the Amazon Bedrock SDK does not like + * the formatting of XContent JSON output + */ +public interface AmazonBedrockJsonWriter { + JsonGenerator writeJson(JsonGenerator generator) throws IOException; +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockRequest.java new file mode 100644 index 0000000000000..e356212ed07fb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockRequest.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockBaseClient; +import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.net.URI; + +public abstract class AmazonBedrockRequest implements Request { + + protected final AmazonBedrockModel amazonBedrockModel; + protected final String inferenceId; + protected final TimeValue timeout; + + protected AmazonBedrockRequest(AmazonBedrockModel model, @Nullable TimeValue timeout) { + this.amazonBedrockModel = model; + this.inferenceId = model.getInferenceEntityId(); + this.timeout = timeout; + } + + protected abstract void executeRequest(AmazonBedrockBaseClient client); + + public AmazonBedrockModel model() { + return amazonBedrockModel; + } + + /** + * Amazon Bedrock uses the AWS SDK, and will not create its own Http Request + * But, this is needed for the ExecutableInferenceRequest to get the inferenceEntityId + * @return NoOp request + */ + @Override + public final HttpRequest createHttpRequest() { + return new HttpRequest(new NoOpHttpRequest(), inferenceId); + } + + /** + * Amazon Bedrock uses the AWS SDK, and will not create its own URI + * @return null + */ + @Override + public final URI getURI() { + throw new UnsupportedOperationException(); + } + + /** + * Should be overridden for text embeddings requests + * @return null + */ + @Override + public Request truncate() { + return this; + } + + /** + * Should be overridden for text embeddings requests + * @return boolean[0] + */ + @Override + public boolean[] getTruncationInfo() { + return new boolean[0]; + } + + @Override + public String getInferenceEntityId() { + return amazonBedrockModel.getInferenceEntityId(); + } + + public TimeValue timeout() { + return timeout; + } + + public abstract TaskType taskType(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/NoOpHttpRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/NoOpHttpRequest.java new file mode 100644 index 0000000000000..7087bb03bca5e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/NoOpHttpRequest.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import org.apache.http.client.methods.HttpRequestBase; + +/** + * Needed for compatibility with RequestSender + */ +public class NoOpHttpRequest extends HttpRequestBase { + @Override + public String getMethod() { + return "NOOP"; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntity.java new file mode 100644 index 0000000000000..6e2f2f6702005 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntity.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockAI21LabsCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockAI21LabsCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + return request; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntity.java new file mode 100644 index 0000000000000..a8b0032af09c5 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntity.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockAnthropicCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockAnthropicCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + if (topK == null) { + return request; + } + + String topKField = Strings.format("{\"top_k\":%f}", topK.floatValue()); + return request.withAdditionalModelResponseFieldPaths(topKField); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionEntityFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionEntityFactory.java new file mode 100644 index 0000000000000..f86d2229d42ad --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionEntityFactory.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; + +import java.util.List; +import java.util.Objects; + +public final class AmazonBedrockChatCompletionEntityFactory { + public static AmazonBedrockConverseRequestEntity createEntity(AmazonBedrockChatCompletionModel model, List messages) { + Objects.requireNonNull(model); + Objects.requireNonNull(messages); + var serviceSettings = model.getServiceSettings(); + var taskSettings = model.getTaskSettings(); + switch (serviceSettings.provider()) { + case AI21LABS -> { + return new AmazonBedrockAI21LabsCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.maxNewTokens() + ); + } + case AMAZONTITAN -> { + return new AmazonBedrockTitanCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.maxNewTokens() + ); + } + case ANTHROPIC -> { + return new AmazonBedrockAnthropicCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.topK(), + taskSettings.maxNewTokens() + ); + } + case COHERE -> { + return new AmazonBedrockCohereCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.topK(), + taskSettings.maxNewTokens() + ); + } + case META -> { + return new AmazonBedrockMetaCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.maxNewTokens() + ); + } + case MISTRAL -> { + return new AmazonBedrockMistralCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.topK(), + taskSettings.maxNewTokens() + ); + } + default -> { + return null; + } + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionRequest.java new file mode 100644 index 0000000000000..f02f05f2d3b17 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionRequest.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockBaseClient; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseListener; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; + +import java.io.IOException; +import java.util.Objects; + +public class AmazonBedrockChatCompletionRequest extends AmazonBedrockRequest { + public static final String USER_ROLE = "user"; + private final AmazonBedrockConverseRequestEntity requestEntity; + private AmazonBedrockChatCompletionResponseListener listener; + + public AmazonBedrockChatCompletionRequest( + AmazonBedrockChatCompletionModel model, + AmazonBedrockConverseRequestEntity requestEntity, + @Nullable TimeValue timeout + ) { + super(model, timeout); + this.requestEntity = Objects.requireNonNull(requestEntity); + } + + @Override + protected void executeRequest(AmazonBedrockBaseClient client) { + var converseRequest = getConverseRequest(); + + try { + SocketAccess.doPrivileged(() -> client.converse(converseRequest, listener)); + } catch (IOException e) { + listener.onFailure(new RuntimeException(e)); + } + } + + @Override + public TaskType taskType() { + return TaskType.COMPLETION; + } + + private ConverseRequest getConverseRequest() { + var converseRequest = new ConverseRequest().withModelId(amazonBedrockModel.model()); + converseRequest = requestEntity.addMessages(converseRequest); + converseRequest = requestEntity.addInferenceConfig(converseRequest); + converseRequest = requestEntity.addAdditionalModelFields(converseRequest); + return converseRequest; + } + + public void executeChatCompletionRequest( + AmazonBedrockBaseClient awsBedrockClient, + AmazonBedrockChatCompletionResponseListener chatCompletionResponseListener + ) { + this.listener = chatCompletionResponseListener; + this.executeRequest(awsBedrockClient); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntity.java new file mode 100644 index 0000000000000..17a264ef820ff --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntity.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockCohereCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockCohereCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + if (topK == null) { + return request; + } + + String topKField = Strings.format("{\"top_k\":%f}", topK.floatValue()); + return request.withAdditionalModelResponseFieldPaths(topKField); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestEntity.java new file mode 100644 index 0000000000000..fbd55e76e509b --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestEntity.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; + +public interface AmazonBedrockConverseRequestEntity { + ConverseRequest addMessages(ConverseRequest request); + + ConverseRequest addInferenceConfig(ConverseRequest request); + + ConverseRequest addAdditionalModelFields(ConverseRequest request); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseUtils.java new file mode 100644 index 0000000000000..2cfb56a94b319 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseUtils.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ContentBlock; +import com.amazonaws.services.bedrockruntime.model.Message; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest.USER_ROLE; + +public final class AmazonBedrockConverseUtils { + + public static List getConverseMessageList(List messages) { + List messageList = new ArrayList<>(); + for (String message : messages) { + var messageContent = new ContentBlock().withText(message); + var returnMessage = (new Message()).withRole(USER_ROLE).withContent(messageContent); + messageList.add(returnMessage); + } + return messageList; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntity.java new file mode 100644 index 0000000000000..cdabdd4cbebff --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntity.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockMetaCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockMetaCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + return request; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntity.java new file mode 100644 index 0000000000000..c68eaa1b81f54 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntity.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockMistralCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockMistralCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + if (topK == null) { + return request; + } + + String topKField = Strings.format("{\"top_k\":%f}", topK.floatValue()); + return request.withAdditionalModelResponseFieldPaths(topKField); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntity.java new file mode 100644 index 0000000000000..d56035b80e9ef --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntity.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockTitanCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockTitanCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + return request; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntity.java new file mode 100644 index 0000000000000..edca5bc1bdf9c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntity.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public record AmazonBedrockCohereEmbeddingsRequestEntity(List input) implements ToXContentObject { + + private static final String TEXTS_FIELD = "texts"; + private static final String INPUT_TYPE_FIELD = "input_type"; + private static final String INPUT_TYPE_SEARCH_DOCUMENT = "search_document"; + + public AmazonBedrockCohereEmbeddingsRequestEntity { + Objects.requireNonNull(input); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TEXTS_FIELD, input); + builder.field(INPUT_TYPE_FIELD, INPUT_TYPE_SEARCH_DOCUMENT); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsEntityFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsEntityFactory.java new file mode 100644 index 0000000000000..a31b033507264 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsEntityFactory.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.Objects; + +public final class AmazonBedrockEmbeddingsEntityFactory { + public static ToXContent createEntity(AmazonBedrockEmbeddingsModel model, Truncator.TruncationResult truncationResult) { + Objects.requireNonNull(model); + Objects.requireNonNull(truncationResult); + + var serviceSettings = model.getServiceSettings(); + + var truncatedInput = truncationResult.input(); + if (truncatedInput == null || truncatedInput.isEmpty()) { + throw new ElasticsearchException("[input] cannot be null or empty"); + } + + switch (serviceSettings.provider()) { + case AMAZONTITAN -> { + if (truncatedInput.size() > 1) { + throw new ElasticsearchException("[input] cannot contain more than one string"); + } + return new AmazonBedrockTitanEmbeddingsRequestEntity(truncatedInput.get(0)); + } + case COHERE -> { + return new AmazonBedrockCohereEmbeddingsRequestEntity(truncatedInput); + } + default -> { + return null; + } + } + + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsRequest.java new file mode 100644 index 0000000000000..96d3b3a3cc057 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsRequest.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelRequest; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockBaseClient; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockJsonBuilder; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseListener; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Objects; + +public class AmazonBedrockEmbeddingsRequest extends AmazonBedrockRequest { + private final AmazonBedrockEmbeddingsModel embeddingsModel; + private final ToXContent requestEntity; + private final Truncator truncator; + private final Truncator.TruncationResult truncationResult; + private final AmazonBedrockProvider provider; + private ActionListener listener = null; + + public AmazonBedrockEmbeddingsRequest( + Truncator truncator, + Truncator.TruncationResult input, + AmazonBedrockEmbeddingsModel model, + ToXContent requestEntity, + @Nullable TimeValue timeout + ) { + super(model, timeout); + this.truncator = Objects.requireNonNull(truncator); + this.truncationResult = Objects.requireNonNull(input); + this.requestEntity = Objects.requireNonNull(requestEntity); + this.embeddingsModel = model; + this.provider = model.provider(); + } + + public AmazonBedrockProvider provider() { + return provider; + } + + @Override + protected void executeRequest(AmazonBedrockBaseClient client) { + try { + var jsonBuilder = new AmazonBedrockJsonBuilder(requestEntity); + var bodyAsString = jsonBuilder.getStringContent(); + + var charset = StandardCharsets.UTF_8; + var bodyBuffer = charset.encode(bodyAsString); + + var invokeModelRequest = new InvokeModelRequest().withModelId(embeddingsModel.model()).withBody(bodyBuffer); + + SocketAccess.doPrivileged(() -> client.invokeModel(invokeModelRequest, listener)); + } catch (IOException e) { + listener.onFailure(new RuntimeException(e)); + } + } + + @Override + public Request truncate() { + var truncatedInput = truncator.truncate(truncationResult.input()); + return new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, embeddingsModel, requestEntity, timeout); + } + + @Override + public boolean[] getTruncationInfo() { + return truncationResult.truncated().clone(); + } + + @Override + public TaskType taskType() { + return TaskType.TEXT_EMBEDDING; + } + + public void executeEmbeddingsRequest( + AmazonBedrockBaseClient awsBedrockClient, + AmazonBedrockEmbeddingsResponseListener embeddingsResponseListener + ) { + this.listener = embeddingsResponseListener; + this.executeRequest(awsBedrockClient); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntity.java new file mode 100644 index 0000000000000..f55edd0442913 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntity.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public record AmazonBedrockTitanEmbeddingsRequestEntity(String inputText) implements ToXContentObject { + + private static final String INPUT_TEXT_FIELD = "inputText"; + + public AmazonBedrockTitanEmbeddingsRequestEntity { + Objects.requireNonNull(inputText); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INPUT_TEXT_FIELD, inputText); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponse.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponse.java new file mode 100644 index 0000000000000..54b05137acda3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponse.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock; + +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; + +public abstract class AmazonBedrockResponse { + public abstract InferenceServiceResults accept(AmazonBedrockRequest request); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseHandler.java new file mode 100644 index 0000000000000..9dc15ea667c1d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseHandler.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +public abstract class AmazonBedrockResponseHandler implements ResponseHandler { + @Override + public final void validateResponse(ThrottlerManager throttlerManager, Logger logger, Request request, HttpResult result) + throws RetryException { + // do nothing as the AWS SDK will take care of validation for us + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseListener.java new file mode 100644 index 0000000000000..ce4d6d1dea655 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseListener.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; + +import java.util.Objects; + +public class AmazonBedrockResponseListener { + protected final AmazonBedrockRequest request; + protected final ActionListener inferenceResultsListener; + protected final AmazonBedrockResponseHandler responseHandler; + + public AmazonBedrockResponseListener( + AmazonBedrockRequest request, + AmazonBedrockResponseHandler responseHandler, + ActionListener inferenceResultsListener + ) { + this.request = Objects.requireNonNull(request); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.inferenceResultsListener = Objects.requireNonNull(inferenceResultsListener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponse.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponse.java new file mode 100644 index 0000000000000..5b3872e2c416a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponse.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponse; + +import java.util.ArrayList; + +public class AmazonBedrockChatCompletionResponse extends AmazonBedrockResponse { + + private final ConverseResult result; + + public AmazonBedrockChatCompletionResponse(ConverseResult responseResult) { + this.result = responseResult; + } + + @Override + public InferenceServiceResults accept(AmazonBedrockRequest request) { + if (request instanceof AmazonBedrockChatCompletionRequest asChatCompletionRequest) { + return fromResponse(result); + } + + throw new ElasticsearchException("unexpected request type [" + request.getClass() + "]"); + } + + public static ChatCompletionResults fromResponse(ConverseResult response) { + var responseMessage = response.getOutput().getMessage(); + + var messageContents = responseMessage.getContent(); + var resultTexts = new ArrayList(); + for (var messageContent : messageContents) { + resultTexts.add(new ChatCompletionResults.Result(messageContent.getText())); + } + + return new ChatCompletionResults(resultTexts); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseHandler.java new file mode 100644 index 0000000000000..a24f54c50eef3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseHandler.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; + +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; + +public class AmazonBedrockChatCompletionResponseHandler extends AmazonBedrockResponseHandler { + + private ConverseResult responseResult; + + public AmazonBedrockChatCompletionResponseHandler() {} + + @Override + public InferenceServiceResults parseResult(Request request, HttpResult result) throws RetryException { + var response = new AmazonBedrockChatCompletionResponse(responseResult); + return response.accept((AmazonBedrockRequest) request); + } + + @Override + public String getRequestType() { + return "Amazon Bedrock Chat Completion"; + } + + public void acceptChatCompletionResponseObject(ConverseResult response) { + this.responseResult = response; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseListener.java new file mode 100644 index 0000000000000..be03ba84571eb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseListener.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseListener; + +public class AmazonBedrockChatCompletionResponseListener extends AmazonBedrockResponseListener implements ActionListener { + + public AmazonBedrockChatCompletionResponseListener( + AmazonBedrockChatCompletionRequest request, + AmazonBedrockResponseHandler responseHandler, + ActionListener inferenceResultsListener + ) { + super(request, responseHandler, inferenceResultsListener); + } + + @Override + public void onResponse(ConverseResult result) { + ((AmazonBedrockChatCompletionResponseHandler) responseHandler).acceptChatCompletionResponseObject(result); + inferenceResultsListener.onResponse(responseHandler.parseResult(request, null)); + } + + @Override + public void onFailure(Exception e) { + throw new ElasticsearchException(e); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponse.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponse.java new file mode 100644 index 0000000000000..83fa790acbe68 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponse.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.XContentUtils; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponse; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseList; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.positionParserAtTokenAfterField; + +public class AmazonBedrockEmbeddingsResponse extends AmazonBedrockResponse { + private static final String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Amazon Bedrock embeddings response"; + private final InvokeModelResult result; + + public AmazonBedrockEmbeddingsResponse(InvokeModelResult invokeModelResult) { + this.result = invokeModelResult; + } + + @Override + public InferenceServiceResults accept(AmazonBedrockRequest request) { + if (request instanceof AmazonBedrockEmbeddingsRequest asEmbeddingsRequest) { + return fromResponse(result, asEmbeddingsRequest.provider()); + } + + throw new ElasticsearchException("unexpected request type [" + request.getClass() + "]"); + } + + public static InferenceTextEmbeddingFloatResults fromResponse(InvokeModelResult response, AmazonBedrockProvider provider) { + var charset = StandardCharsets.UTF_8; + var bodyText = String.valueOf(charset.decode(response.getBody())); + + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, bodyText)) { + // move to the first token + jsonParser.nextToken(); + + XContentParser.Token token = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + var embeddingList = parseEmbeddings(jsonParser, provider); + + return new InferenceTextEmbeddingFloatResults(embeddingList); + } catch (IOException e) { + throw new ElasticsearchException(e); + } + } + + private static List parseEmbeddings( + XContentParser jsonParser, + AmazonBedrockProvider provider + ) throws IOException { + switch (provider) { + case AMAZONTITAN -> { + return parseTitanEmbeddings(jsonParser); + } + case COHERE -> { + return parseCohereEmbeddings(jsonParser); + } + default -> throw new IOException("Unsupported provider [" + provider + "]"); + } + } + + private static List parseTitanEmbeddings(XContentParser parser) + throws IOException { + /* + Titan response: + { + "embedding": [float, float, ...], + "inputTextTokenCount": int + } + */ + positionParserAtTokenAfterField(parser, "embedding", FAILED_TO_FIND_FIELD_TEMPLATE); + List embeddingValuesList = parseList(parser, XContentUtils::parseFloat); + var embeddingValues = InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(embeddingValuesList); + return List.of(embeddingValues); + } + + private static List parseCohereEmbeddings(XContentParser parser) + throws IOException { + /* + Cohere response: + { + "embeddings": [ + [< array of 1024 floats >], + ... + ], + "id": string, + "response_type" : "embeddings_floats", + "texts": [string] + } + */ + positionParserAtTokenAfterField(parser, "embeddings", FAILED_TO_FIND_FIELD_TEMPLATE); + + List embeddingList = parseList( + parser, + AmazonBedrockEmbeddingsResponse::parseCohereEmbeddingsListItem + ); + + return embeddingList; + } + + private static InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding parseCohereEmbeddingsListItem(XContentParser parser) + throws IOException { + List embeddingValuesList = parseList(parser, XContentUtils::parseFloat); + return InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(embeddingValuesList); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseHandler.java new file mode 100644 index 0000000000000..a3fb68ee23486 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseHandler.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; + +public class AmazonBedrockEmbeddingsResponseHandler extends AmazonBedrockResponseHandler { + + private InvokeModelResult invokeModelResult; + + @Override + public InferenceServiceResults parseResult(Request request, HttpResult result) throws RetryException { + var responseParser = new AmazonBedrockEmbeddingsResponse(invokeModelResult); + return responseParser.accept((AmazonBedrockRequest) request); + } + + @Override + public String getRequestType() { + return "Amazon Bedrock Embeddings"; + } + + public void acceptEmbeddingsResult(InvokeModelResult result) { + this.invokeModelResult = result; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseListener.java new file mode 100644 index 0000000000000..36519ae31ff60 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseListener.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseListener; + +public class AmazonBedrockEmbeddingsResponseListener extends AmazonBedrockResponseListener implements ActionListener { + + public AmazonBedrockEmbeddingsResponseListener( + AmazonBedrockEmbeddingsRequest request, + AmazonBedrockResponseHandler responseHandler, + ActionListener inferenceResultsListener + ) { + super(request, responseHandler, inferenceResultsListener); + } + + @Override + public void onResponse(InvokeModelResult result) { + ((AmazonBedrockEmbeddingsResponseHandler) responseHandler).acceptEmbeddingsResult(result); + inferenceResultsListener.onResponse(responseHandler.parseResult(request, null)); + } + + @Override + public void onFailure(Exception e) { + inferenceResultsListener.onFailure(e); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockConstants.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockConstants.java new file mode 100644 index 0000000000000..1755dac2ac13f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockConstants.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +public class AmazonBedrockConstants { + public static final String ACCESS_KEY_FIELD = "access_key"; + public static final String SECRET_KEY_FIELD = "secret_key"; + public static final String REGION_FIELD = "region"; + public static final String MODEL_FIELD = "model"; + public static final String PROVIDER_FIELD = "provider"; + + public static final String TEMPERATURE_FIELD = "temperature"; + public static final String TOP_P_FIELD = "top_p"; + public static final String TOP_K_FIELD = "top_k"; + public static final String MAX_NEW_TOKENS_FIELD = "max_new_tokens"; + + public static final Double MIN_TEMPERATURE_TOP_P_TOP_K_VALUE = 0.0; + public static final Double MAX_TEMPERATURE_TOP_P_TOP_K_VALUE = 1.0; + + public static final int DEFAULT_MAX_CHUNK_SIZE = 2048; + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java new file mode 100644 index 0000000000000..13ca8bd7bd749 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionVisitor; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.util.Map; + +public abstract class AmazonBedrockModel extends Model { + + protected String region; + protected String model; + protected AmazonBedrockProvider provider; + protected RateLimitSettings rateLimitSettings; + + protected AmazonBedrockModel(ModelConfigurations modelConfigurations, ModelSecrets secrets) { + super(modelConfigurations, secrets); + setPropertiesFromServiceSettings((AmazonBedrockServiceSettings) modelConfigurations.getServiceSettings()); + } + + protected AmazonBedrockModel(Model model, TaskSettings taskSettings) { + super(model, taskSettings); + + if (model instanceof AmazonBedrockModel bedrockModel) { + setPropertiesFromServiceSettings(bedrockModel.getServiceSettings()); + } + } + + protected AmazonBedrockModel(Model model, ServiceSettings serviceSettings) { + super(model, serviceSettings); + if (serviceSettings instanceof AmazonBedrockServiceSettings bedrockServiceSettings) { + setPropertiesFromServiceSettings(bedrockServiceSettings); + } + } + + protected AmazonBedrockModel(ModelConfigurations modelConfigurations) { + super(modelConfigurations); + setPropertiesFromServiceSettings((AmazonBedrockServiceSettings) modelConfigurations.getServiceSettings()); + } + + public String region() { + return region; + } + + public String model() { + return model; + } + + public AmazonBedrockProvider provider() { + return provider; + } + + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; + } + + private void setPropertiesFromServiceSettings(AmazonBedrockServiceSettings serviceSettings) { + this.region = serviceSettings.region(); + this.model = serviceSettings.model(); + this.provider = serviceSettings.provider(); + this.rateLimitSettings = serviceSettings.rateLimitSettings(); + } + + public abstract ExecutableAction accept(AmazonBedrockActionVisitor creator, Map taskSettings); + + @Override + public AmazonBedrockServiceSettings getServiceSettings() { + return (AmazonBedrockServiceSettings) super.getServiceSettings(); + } + + @Override + public AmazonBedrockSecretSettings getSecretSettings() { + return (AmazonBedrockSecretSettings) super.getSecretSettings(); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProvider.java new file mode 100644 index 0000000000000..340a5a65f0969 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProvider.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import java.util.Locale; + +public enum AmazonBedrockProvider { + AMAZONTITAN, + ANTHROPIC, + AI21LABS, + COHERE, + META, + MISTRAL; + + public static String NAME = "amazon_bedrock_provider"; + + public static AmazonBedrockProvider fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProviderCapabilities.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProviderCapabilities.java new file mode 100644 index 0000000000000..28b10ef294bda --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProviderCapabilities.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.DEFAULT_MAX_CHUNK_SIZE; + +public final class AmazonBedrockProviderCapabilities { + private static final List embeddingProviders = List.of( + AmazonBedrockProvider.AMAZONTITAN, + AmazonBedrockProvider.COHERE + ); + + private static final List chatCompletionProviders = List.of( + AmazonBedrockProvider.AMAZONTITAN, + AmazonBedrockProvider.ANTHROPIC, + AmazonBedrockProvider.AI21LABS, + AmazonBedrockProvider.COHERE, + AmazonBedrockProvider.META, + AmazonBedrockProvider.MISTRAL + ); + + private static final List chatCompletionProvidersWithTopK = List.of( + AmazonBedrockProvider.ANTHROPIC, + AmazonBedrockProvider.COHERE, + AmazonBedrockProvider.MISTRAL + ); + + private static final Map embeddingsDefaultSimilarityMeasure = Map.of( + AmazonBedrockProvider.AMAZONTITAN, + SimilarityMeasure.COSINE, + AmazonBedrockProvider.COHERE, + SimilarityMeasure.DOT_PRODUCT + ); + + private static final Map embeddingsDefaultChunkSize = Map.of( + AmazonBedrockProvider.AMAZONTITAN, + 8192, + AmazonBedrockProvider.COHERE, + 2048 + ); + + private static final Map embeddingsMaxBatchSize = Map.of( + AmazonBedrockProvider.AMAZONTITAN, + 1, + AmazonBedrockProvider.COHERE, + 96 + ); + + public static boolean providerAllowsTaskType(AmazonBedrockProvider provider, TaskType taskType) { + switch (taskType) { + case COMPLETION -> { + return chatCompletionProviders.contains(provider); + } + case TEXT_EMBEDDING -> { + return embeddingProviders.contains(provider); + } + default -> { + return false; + } + } + } + + public static boolean chatCompletionProviderHasTopKParameter(AmazonBedrockProvider provider) { + return chatCompletionProvidersWithTopK.contains(provider); + } + + public static SimilarityMeasure getProviderDefaultSimilarityMeasure(AmazonBedrockProvider provider) { + if (embeddingsDefaultSimilarityMeasure.containsKey(provider)) { + return embeddingsDefaultSimilarityMeasure.get(provider); + } + + return SimilarityMeasure.COSINE; + } + + public static int getEmbeddingsProviderDefaultChunkSize(AmazonBedrockProvider provider) { + if (embeddingsDefaultChunkSize.containsKey(provider)) { + return embeddingsDefaultChunkSize.get(provider); + } + + return DEFAULT_MAX_CHUNK_SIZE; + } + + public static int getEmbeddingsMaxBatchSize(AmazonBedrockProvider provider) { + if (embeddingsMaxBatchSize.containsKey(provider)) { + return embeddingsMaxBatchSize.get(provider); + } + + return 1; + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java new file mode 100644 index 0000000000000..9e6328ce1c358 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredSecureString; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.ACCESS_KEY_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.SECRET_KEY_FIELD; + +public class AmazonBedrockSecretSettings implements SecretSettings { + public static final String NAME = "amazon_bedrock_secret_settings"; + + public final SecureString accessKey; + public final SecureString secretKey; + + public static AmazonBedrockSecretSettings fromMap(@Nullable Map map) { + if (map == null) { + return null; + } + + ValidationException validationException = new ValidationException(); + SecureString secureAccessKey = extractRequiredSecureString( + map, + ACCESS_KEY_FIELD, + ModelSecrets.SECRET_SETTINGS, + validationException + ); + SecureString secureSecretKey = extractRequiredSecureString( + map, + SECRET_KEY_FIELD, + ModelSecrets.SECRET_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockSecretSettings(secureAccessKey, secureSecretKey); + } + + public AmazonBedrockSecretSettings(SecureString accessKey, SecureString secretKey) { + this.accessKey = Objects.requireNonNull(accessKey); + this.secretKey = Objects.requireNonNull(secretKey); + } + + public AmazonBedrockSecretSettings(StreamInput in) throws IOException { + this.accessKey = in.readSecureString(); + this.secretKey = in.readSecureString(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeSecureString(accessKey); + out.writeSecureString(secretKey); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.field(ACCESS_KEY_FIELD, accessKey.toString()); + builder.field(SECRET_KEY_FIELD, secretKey.toString()); + + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + AmazonBedrockSecretSettings that = (AmazonBedrockSecretSettings) object; + return Objects.equals(accessKey, that.accessKey) && Objects.equals(secretKey, that.secretKey); + } + + @Override + public int hashCode() { + return Objects.hash(accessKey, secretKey); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java new file mode 100644 index 0000000000000..dadcc8a40245e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java @@ -0,0 +1,350 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; +import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionCreator; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.SenderService; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.chatCompletionProviderHasTopKParameter; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.getEmbeddingsMaxBatchSize; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.getProviderDefaultSimilarityMeasure; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.providerAllowsTaskType; + +public class AmazonBedrockService extends SenderService { + public static final String NAME = "amazonbedrock"; + + private final Sender amazonBedrockSender; + + public AmazonBedrockService( + HttpRequestSender.Factory httpSenderFactory, + AmazonBedrockRequestSender.Factory amazonBedrockFactory, + ServiceComponents serviceComponents + ) { + super(httpSenderFactory, serviceComponents); + this.amazonBedrockSender = amazonBedrockFactory.createSender(); + } + + @Override + protected void doInfer( + Model model, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + var actionCreator = new AmazonBedrockActionCreator(amazonBedrockSender, this.getServiceComponents(), timeout); + if (model instanceof AmazonBedrockModel baseAmazonBedrockModel) { + var action = baseAmazonBedrockModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(input), timeout, listener); + } else { + listener.onFailure(createInvalidModelException(model)); + } + } + + @Override + protected void doInfer( + Model model, + String query, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + throw new UnsupportedOperationException("Amazon Bedrock service does not support inference with query input"); + } + + @Override + protected void doChunkedInfer( + Model model, + String query, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + TimeValue timeout, + ActionListener> listener + ) { + ActionListener inferListener = listener.delegateFailureAndWrap( + (delegate, response) -> delegate.onResponse(translateToChunkedResults(input, response)) + ); + + var actionCreator = new AmazonBedrockActionCreator(amazonBedrockSender, this.getServiceComponents(), timeout); + if (model instanceof AmazonBedrockModel baseAmazonBedrockModel) { + var maxBatchSize = getEmbeddingsMaxBatchSize(baseAmazonBedrockModel.provider()); + var batchedRequests = new EmbeddingRequestChunker(input, maxBatchSize, EmbeddingRequestChunker.EmbeddingType.FLOAT) + .batchRequestsWithListeners(listener); + for (var request : batchedRequests) { + var action = baseAmazonBedrockModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, inferListener); + } + } else { + listener.onFailure(createInvalidModelException(model)); + } + } + + private static List translateToChunkedResults( + List inputs, + InferenceServiceResults inferenceResults + ) { + if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { + return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); + } else if (inferenceResults instanceof ErrorInferenceResults error) { + return List.of(new ErrorChunkedInferenceResults(error.getException())); + } else { + throw createInvalidChunkedResultException(InferenceTextEmbeddingFloatResults.NAME, inferenceResults.getWriteableName()); + } + } + + @Override + public String name() { + return NAME; + } + + @Override + public void parseRequestConfig( + String modelId, + TaskType taskType, + Map config, + Set platformArchitectures, + ActionListener parsedModelListener + ) { + try { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + + AmazonBedrockModel model = createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + serviceSettingsMap, + TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), + ConfigurationParseContext.REQUEST + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + + parsedModelListener.onResponse(model); + } catch (Exception e) { + parsedModelListener.onFailure(e); + } + } + + @Override + public Model parsePersistedConfigWithSecrets( + String modelId, + TaskType taskType, + Map config, + Map secrets + ) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + Map secretSettingsMap = removeFromMapOrDefaultEmpty(secrets, ModelSecrets.SECRET_SETTINGS); + + return createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + secretSettingsMap, + parsePersistedConfigErrorMsg(modelId, NAME), + ConfigurationParseContext.PERSISTENT + ); + } + + @Override + public Model parsePersistedConfig(String modelId, TaskType taskType, Map config) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + + return createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + null, + parsePersistedConfigErrorMsg(modelId, NAME), + ConfigurationParseContext.PERSISTENT + ); + } + + private static AmazonBedrockModel createModel( + String inferenceEntityId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + @Nullable Map secretSettings, + String failureMessage, + ConfigurationParseContext context + ) { + switch (taskType) { + case TEXT_EMBEDDING -> { + var model = new AmazonBedrockEmbeddingsModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings, + context + ); + checkProviderForTask(TaskType.TEXT_EMBEDDING, model.provider()); + return model; + } + case COMPLETION -> { + var model = new AmazonBedrockChatCompletionModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings, + context + ); + checkProviderForTask(TaskType.COMPLETION, model.provider()); + checkChatCompletionProviderForTopKParameter(model); + return model; + } + default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); + } + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + /** + * For text embedding models get the embedding size and + * update the service settings. + * + * @param model The new model + * @param listener The listener + */ + @Override + public void checkModelConfig(Model model, ActionListener listener) { + if (model instanceof AmazonBedrockEmbeddingsModel embeddingsModel) { + ServiceUtils.getEmbeddingSize( + model, + this, + listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) + ); + } else { + listener.onResponse(model); + } + } + + private AmazonBedrockEmbeddingsModel updateModelWithEmbeddingDetails(AmazonBedrockEmbeddingsModel model, int embeddingSize) { + AmazonBedrockEmbeddingsServiceSettings serviceSettings = model.getServiceSettings(); + if (serviceSettings.dimensionsSetByUser() + && serviceSettings.dimensions() != null + && serviceSettings.dimensions() != embeddingSize) { + throw new ElasticsearchStatusException( + Strings.format( + "The retrieved embeddings size [%s] does not match the size specified in the settings [%s]. " + + "Please recreate the [%s] configuration with the correct dimensions", + embeddingSize, + serviceSettings.dimensions(), + model.getConfigurations().getInferenceEntityId() + ), + RestStatus.BAD_REQUEST + ); + } + + var similarityFromModel = serviceSettings.similarity(); + var similarityToUse = similarityFromModel == null ? getProviderDefaultSimilarityMeasure(model.provider()) : similarityFromModel; + + AmazonBedrockEmbeddingsServiceSettings settingsToUse = new AmazonBedrockEmbeddingsServiceSettings( + serviceSettings.region(), + serviceSettings.model(), + serviceSettings.provider(), + embeddingSize, + serviceSettings.dimensionsSetByUser(), + serviceSettings.maxInputTokens(), + similarityToUse, + serviceSettings.rateLimitSettings() + ); + + return new AmazonBedrockEmbeddingsModel(model, settingsToUse); + } + + private static void checkProviderForTask(TaskType taskType, AmazonBedrockProvider provider) { + if (providerAllowsTaskType(provider, taskType) == false) { + throw new ElasticsearchStatusException( + Strings.format("The [%s] task type for provider [%s] is not available", taskType, provider), + RestStatus.BAD_REQUEST + ); + } + } + + private static void checkChatCompletionProviderForTopKParameter(AmazonBedrockChatCompletionModel model) { + var taskSettings = model.getTaskSettings(); + if (taskSettings.topK() != null) { + if (chatCompletionProviderHasTopKParameter(model.provider()) == false) { + throw new ElasticsearchStatusException( + Strings.format("The [%s] task parameter is not available for provider [%s]", TOP_K_FIELD, model.provider()), + RestStatus.BAD_REQUEST + ); + } + } + } + + @Override + public void close() throws IOException { + super.close(); + IOUtils.closeWhileHandlingException(amazonBedrockSender); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceSettings.java new file mode 100644 index 0000000000000..13c7c0a8c5938 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceSettings.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.settings.FilteredXContentObject; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredEnum; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; + +public abstract class AmazonBedrockServiceSettings extends FilteredXContentObject implements ServiceSettings { + + protected static final String AMAZON_BEDROCK_BASE_NAME = "amazon_bedrock"; + + protected final String region; + protected final String model; + protected final AmazonBedrockProvider provider; + protected final RateLimitSettings rateLimitSettings; + + // the default requests per minute are defined as per-model in the "Runtime quotas" on AWS + // see: https://docs.aws.amazon.com/bedrock/latest/userguide/quotas.html + // setting this to 240 requests per minute (4 requests / sec) is a sane default for us as it should be enough for + // decent throughput without exceeding the minimal for _most_ items. The user should consult + // the table above if using a model that might have a lesser limit (e.g. Anthropic Claude 3.5) + protected static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(240); + + protected static AmazonBedrockServiceSettings.BaseAmazonBedrockCommonSettings fromMap( + Map map, + ValidationException validationException, + ConfigurationParseContext context + ) { + String model = extractRequiredString(map, MODEL_FIELD, ModelConfigurations.SERVICE_SETTINGS, validationException); + String region = extractRequiredString(map, REGION_FIELD, ModelConfigurations.SERVICE_SETTINGS, validationException); + AmazonBedrockProvider provider = extractRequiredEnum( + map, + PROVIDER_FIELD, + ModelConfigurations.SERVICE_SETTINGS, + AmazonBedrockProvider::fromString, + EnumSet.allOf(AmazonBedrockProvider.class), + validationException + ); + RateLimitSettings rateLimitSettings = RateLimitSettings.of( + map, + DEFAULT_RATE_LIMIT_SETTINGS, + validationException, + AMAZON_BEDROCK_BASE_NAME, + context + ); + + return new BaseAmazonBedrockCommonSettings(region, model, provider, rateLimitSettings); + } + + protected record BaseAmazonBedrockCommonSettings( + String region, + String model, + AmazonBedrockProvider provider, + @Nullable RateLimitSettings rateLimitSettings + ) {} + + protected AmazonBedrockServiceSettings(StreamInput in) throws IOException { + this.region = in.readString(); + this.model = in.readString(); + this.provider = in.readEnum(AmazonBedrockProvider.class); + this.rateLimitSettings = new RateLimitSettings(in); + } + + protected AmazonBedrockServiceSettings( + String region, + String model, + AmazonBedrockProvider provider, + @Nullable RateLimitSettings rateLimitSettings + ) { + this.region = Objects.requireNonNull(region); + this.model = Objects.requireNonNull(model); + this.provider = Objects.requireNonNull(provider); + this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + public String region() { + return region; + } + + public String model() { + return model; + } + + public AmazonBedrockProvider provider() { + return provider; + } + + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(region); + out.writeString(model); + out.writeEnum(provider); + rateLimitSettings.writeTo(out); + } + + public void addBaseXContent(XContentBuilder builder, Params params) throws IOException { + toXContentFragmentOfExposedFields(builder, params); + } + + protected void addXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + builder.field(REGION_FIELD, region); + builder.field(MODEL_FIELD, model); + builder.field(PROVIDER_FIELD, provider.name()); + rateLimitSettings.toXContent(builder, params); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java new file mode 100644 index 0000000000000..27dc607d671aa --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionVisitor; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; + +import java.util.Map; + +public class AmazonBedrockChatCompletionModel extends AmazonBedrockModel { + + public static AmazonBedrockChatCompletionModel of(AmazonBedrockChatCompletionModel completionModel, Map taskSettings) { + if (taskSettings == null || taskSettings.isEmpty()) { + return completionModel; + } + + var requestTaskSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(taskSettings); + var taskSettingsToUse = AmazonBedrockChatCompletionTaskSettings.of(completionModel.getTaskSettings(), requestTaskSettings); + return new AmazonBedrockChatCompletionModel(completionModel, taskSettingsToUse); + } + + public AmazonBedrockChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String name, + Map serviceSettings, + Map taskSettings, + Map secretSettings, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + name, + AmazonBedrockChatCompletionServiceSettings.fromMap(serviceSettings, context), + AmazonBedrockChatCompletionTaskSettings.fromMap(taskSettings), + AmazonBedrockSecretSettings.fromMap(secretSettings) + ); + } + + public AmazonBedrockChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String service, + AmazonBedrockChatCompletionServiceSettings serviceSettings, + AmazonBedrockChatCompletionTaskSettings taskSettings, + AmazonBedrockSecretSettings secrets + ) { + super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secrets)); + } + + public AmazonBedrockChatCompletionModel(Model model, TaskSettings taskSettings) { + super(model, taskSettings); + } + + @Override + public ExecutableAction accept(AmazonBedrockActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } + + @Override + public AmazonBedrockChatCompletionServiceSettings getServiceSettings() { + return (AmazonBedrockChatCompletionServiceSettings) super.getServiceSettings(); + } + + @Override + public AmazonBedrockChatCompletionTaskSettings getTaskSettings() { + return (AmazonBedrockChatCompletionTaskSettings) super.getTaskSettings(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettings.java new file mode 100644 index 0000000000000..5985dcd56c5d2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettings.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalDoubleInRange; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MIN_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; + +public record AmazonBedrockChatCompletionRequestTaskSettings( + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens +) { + + public static final AmazonBedrockChatCompletionRequestTaskSettings EMPTY_SETTINGS = new AmazonBedrockChatCompletionRequestTaskSettings( + null, + null, + null, + null + ); + + /** + * Extracts the task settings from a map. All settings are considered optional and the absence of a setting + * does not throw an error. + * + * @param map the settings received from a request + * @return a {@link AmazonBedrockChatCompletionRequestTaskSettings} + */ + public static AmazonBedrockChatCompletionRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return AmazonBedrockChatCompletionRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + var temperature = extractOptionalDoubleInRange( + map, + TEMPERATURE_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + var topP = extractOptionalDoubleInRange( + map, + TOP_P_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + var topK = extractOptionalDoubleInRange( + map, + TOP_K_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Integer maxNewTokens = extractOptionalPositiveInteger( + map, + MAX_NEW_TOKENS_FIELD, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockChatCompletionRequestTaskSettings(temperature, topP, topK, maxNewTokens); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettings.java new file mode 100644 index 0000000000000..fc3d09c6eea7a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettings.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class AmazonBedrockChatCompletionServiceSettings extends AmazonBedrockServiceSettings { + public static final String NAME = "amazon_bedrock_chat_completion_service_settings"; + + public static AmazonBedrockChatCompletionServiceSettings fromMap( + Map serviceSettings, + ConfigurationParseContext context + ) { + ValidationException validationException = new ValidationException(); + + var baseSettings = AmazonBedrockServiceSettings.fromMap(serviceSettings, validationException, context); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockChatCompletionServiceSettings( + baseSettings.region(), + baseSettings.model(), + baseSettings.provider(), + baseSettings.rateLimitSettings() + ); + } + + public AmazonBedrockChatCompletionServiceSettings( + String region, + String model, + AmazonBedrockProvider provider, + RateLimitSettings rateLimitSettings + ) { + super(region, model, provider, rateLimitSettings); + } + + public AmazonBedrockChatCompletionServiceSettings(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + super.addBaseXContent(builder, params); + builder.endObject(); + return builder; + } + + @Override + protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + super.addXContentFragmentOfExposedFields(builder, params); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockChatCompletionServiceSettings that = (AmazonBedrockChatCompletionServiceSettings) o; + + return Objects.equals(region, that.region) + && Objects.equals(provider, that.provider) + && Objects.equals(model, that.model) + && Objects.equals(rateLimitSettings, that.rateLimitSettings); + } + + @Override + public int hashCode() { + return Objects.hash(region, model, provider, rateLimitSettings); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettings.java new file mode 100644 index 0000000000000..e689e68794e1f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettings.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalDoubleInRange; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MIN_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; + +public class AmazonBedrockChatCompletionTaskSettings implements TaskSettings { + public static final String NAME = "amazon_bedrock_chat_completion_task_settings"; + + public static final AmazonBedrockChatCompletionRequestTaskSettings EMPTY_SETTINGS = new AmazonBedrockChatCompletionRequestTaskSettings( + null, + null, + null, + null + ); + + public static AmazonBedrockChatCompletionTaskSettings fromMap(Map settings) { + ValidationException validationException = new ValidationException(); + + Double temperature = extractOptionalDoubleInRange( + settings, + TEMPERATURE_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Double topP = extractOptionalDoubleInRange( + settings, + TOP_P_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Double topK = extractOptionalDoubleInRange( + settings, + TOP_K_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Integer maxNewTokens = extractOptionalPositiveInteger( + settings, + MAX_NEW_TOKENS_FIELD, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockChatCompletionTaskSettings(temperature, topP, topK, maxNewTokens); + } + + public static AmazonBedrockChatCompletionTaskSettings of( + AmazonBedrockChatCompletionTaskSettings originalSettings, + AmazonBedrockChatCompletionRequestTaskSettings requestSettings + ) { + var temperature = requestSettings.temperature() == null ? originalSettings.temperature() : requestSettings.temperature(); + var topP = requestSettings.topP() == null ? originalSettings.topP() : requestSettings.topP(); + var topK = requestSettings.topK() == null ? originalSettings.topK() : requestSettings.topK(); + var maxNewTokens = requestSettings.maxNewTokens() == null ? originalSettings.maxNewTokens() : requestSettings.maxNewTokens(); + + return new AmazonBedrockChatCompletionTaskSettings(temperature, topP, topK, maxNewTokens); + } + + private final Double temperature; + private final Double topP; + private final Double topK; + private final Integer maxNewTokens; + + public AmazonBedrockChatCompletionTaskSettings( + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens + ) { + this.temperature = temperature; + this.topP = topP; + this.topK = topK; + this.maxNewTokens = maxNewTokens; + } + + public AmazonBedrockChatCompletionTaskSettings(StreamInput in) throws IOException { + this.temperature = in.readOptionalDouble(); + this.topP = in.readOptionalDouble(); + this.topK = in.readOptionalDouble(); + this.maxNewTokens = in.readOptionalVInt(); + } + + public Double temperature() { + return temperature; + } + + public Double topP() { + return topP; + } + + public Double topK() { + return topK; + } + + public Integer maxNewTokens() { + return maxNewTokens; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalDouble(temperature); + out.writeOptionalDouble(topP); + out.writeOptionalDouble(topK); + out.writeOptionalVInt(maxNewTokens); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + if (temperature != null) { + builder.field(TEMPERATURE_FIELD, temperature); + } + if (topP != null) { + builder.field(TOP_P_FIELD, topP); + } + if (topK != null) { + builder.field(TOP_K_FIELD, topK); + } + if (maxNewTokens != null) { + builder.field(MAX_NEW_TOKENS_FIELD, maxNewTokens); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockChatCompletionTaskSettings that = (AmazonBedrockChatCompletionTaskSettings) o; + return Objects.equals(temperature, that.temperature) + && Objects.equals(topP, that.topP) + && Objects.equals(topK, that.topK) + && Objects.equals(maxNewTokens, that.maxNewTokens); + } + + @Override + public int hashCode() { + return Objects.hash(temperature, topP, topK, maxNewTokens); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModel.java new file mode 100644 index 0000000000000..0e3a954a03279 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModel.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionVisitor; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; + +import java.util.Map; + +public class AmazonBedrockEmbeddingsModel extends AmazonBedrockModel { + + public static AmazonBedrockEmbeddingsModel of(AmazonBedrockEmbeddingsModel embeddingsModel, Map taskSettings) { + if (taskSettings != null && taskSettings.isEmpty() == false) { + // no task settings allowed + var validationException = new ValidationException(); + validationException.addValidationError("Amazon Bedrock embeddings model cannot have task settings"); + throw validationException; + } + + return embeddingsModel; + } + + public AmazonBedrockEmbeddingsModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + Map secretSettings, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + service, + AmazonBedrockEmbeddingsServiceSettings.fromMap(serviceSettings, context), + new EmptyTaskSettings(), + AmazonBedrockSecretSettings.fromMap(secretSettings) + ); + } + + public AmazonBedrockEmbeddingsModel( + String inferenceEntityId, + TaskType taskType, + String service, + AmazonBedrockEmbeddingsServiceSettings serviceSettings, + TaskSettings taskSettings, + AmazonBedrockSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, new EmptyTaskSettings()), + new ModelSecrets(secrets) + ); + } + + public AmazonBedrockEmbeddingsModel(Model model, ServiceSettings serviceSettings) { + super(model, serviceSettings); + } + + @Override + public ExecutableAction accept(AmazonBedrockActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } + + @Override + public AmazonBedrockEmbeddingsServiceSettings getServiceSettings() { + return (AmazonBedrockEmbeddingsServiceSettings) super.getServiceSettings(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettings.java new file mode 100644 index 0000000000000..4bf037558c618 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettings.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.DIMENSIONS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; + +public class AmazonBedrockEmbeddingsServiceSettings extends AmazonBedrockServiceSettings { + public static final String NAME = "amazon_bedrock_embeddings_service_settings"; + static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user"; + + private final Integer dimensions; + private final Boolean dimensionsSetByUser; + private final Integer maxInputTokens; + private final SimilarityMeasure similarity; + + public static AmazonBedrockEmbeddingsServiceSettings fromMap(Map map, ConfigurationParseContext context) { + ValidationException validationException = new ValidationException(); + + var settings = embeddingSettingsFromMap(map, validationException, context); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return settings; + } + + private static AmazonBedrockEmbeddingsServiceSettings embeddingSettingsFromMap( + Map map, + ValidationException validationException, + ConfigurationParseContext context + ) { + var baseSettings = AmazonBedrockServiceSettings.fromMap(map, validationException, context); + SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); + + Integer maxTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); + + Boolean dimensionsSetByUser = extractOptionalBoolean(map, DIMENSIONS_SET_BY_USER, validationException); + + switch (context) { + case REQUEST -> { + if (dimensionsSetByUser != null) { + validationException.addValidationError( + ServiceUtils.invalidSettingError(DIMENSIONS_SET_BY_USER, ModelConfigurations.SERVICE_SETTINGS) + ); + } + + if (dims != null) { + validationException.addValidationError( + ServiceUtils.invalidSettingError(DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS) + ); + } + dimensionsSetByUser = false; + } + case PERSISTENT -> { + if (dimensionsSetByUser == null) { + validationException.addValidationError( + ServiceUtils.missingSettingErrorMsg(DIMENSIONS_SET_BY_USER, ModelConfigurations.SERVICE_SETTINGS) + ); + } + } + } + return new AmazonBedrockEmbeddingsServiceSettings( + baseSettings.region(), + baseSettings.model(), + baseSettings.provider(), + dims, + dimensionsSetByUser, + maxTokens, + similarity, + baseSettings.rateLimitSettings() + ); + } + + public AmazonBedrockEmbeddingsServiceSettings(StreamInput in) throws IOException { + super(in); + dimensions = in.readOptionalVInt(); + dimensionsSetByUser = in.readBoolean(); + maxInputTokens = in.readOptionalVInt(); + similarity = in.readOptionalEnum(SimilarityMeasure.class); + } + + public AmazonBedrockEmbeddingsServiceSettings( + String region, + String model, + AmazonBedrockProvider provider, + @Nullable Integer dimensions, + Boolean dimensionsSetByUser, + @Nullable Integer maxInputTokens, + @Nullable SimilarityMeasure similarity, + RateLimitSettings rateLimitSettings + ) { + super(region, model, provider, rateLimitSettings); + this.dimensions = dimensions; + this.dimensionsSetByUser = dimensionsSetByUser; + this.maxInputTokens = maxInputTokens; + this.similarity = similarity; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalVInt(dimensions); + out.writeBoolean(dimensionsSetByUser); + out.writeOptionalVInt(maxInputTokens); + out.writeOptionalEnum(similarity); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + super.addBaseXContent(builder, params); + builder.field(DIMENSIONS_SET_BY_USER, dimensionsSetByUser); + + builder.endObject(); + return builder; + } + + @Override + protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + super.addXContentFragmentOfExposedFields(builder, params); + + if (dimensions != null) { + builder.field(DIMENSIONS, dimensions); + } + if (maxInputTokens != null) { + builder.field(MAX_INPUT_TOKENS, maxInputTokens); + } + if (similarity != null) { + builder.field(SIMILARITY, similarity); + } + + return builder; + } + + @Override + public SimilarityMeasure similarity() { + return similarity; + } + + @Override + public Integer dimensions() { + return dimensions; + } + + public boolean dimensionsSetByUser() { + return this.dimensionsSetByUser; + } + + public Integer maxInputTokens() { + return maxInputTokens; + } + + @Override + public DenseVectorFieldMapper.ElementType elementType() { + return DenseVectorFieldMapper.ElementType.FLOAT; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockEmbeddingsServiceSettings that = (AmazonBedrockEmbeddingsServiceSettings) o; + + return Objects.equals(region, that.region) + && Objects.equals(provider, that.provider) + && Objects.equals(model, that.model) + && Objects.equals(dimensions, that.dimensions) + && Objects.equals(dimensionsSetByUser, that.dimensionsSetByUser) + && Objects.equals(maxInputTokens, that.maxInputTokens) + && Objects.equals(similarity, that.similarity) + && Objects.equals(rateLimitSettings, that.rateLimitSettings); + } + + @Override + public int hashCode() { + return Objects.hash(region, model, provider, dimensions, dimensionsSetByUser, maxInputTokens, similarity, rateLimitSettings); + } + +} diff --git a/x-pack/plugin/inference/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/inference/src/main/plugin-metadata/plugin-security.policy index f21a46521a7f7..a39fcf53be7f3 100644 --- a/x-pack/plugin/inference/src/main/plugin-metadata/plugin-security.policy +++ b/x-pack/plugin/inference/src/main/plugin-metadata/plugin-security.policy @@ -8,12 +8,18 @@ grant { // required by: com.google.api.client.json.JsonParser#parseValue + // also required by AWS SDK for client configuration permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "getClassLoader"; + // required by: com.google.api.client.json.GenericJson# + // also by AWS SDK for Jackson's ObjectMapper permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + // required to add google certs to the gcs client trustore permission java.lang.RuntimePermission "setFactory"; // gcs client opens socket connections for to access repository - permission java.net.SocketPermission "*", "connect"; + // also, AWS Bedrock client opens socket connections and needs resolve for to access to resources + permission java.net.SocketPermission "*", "connect,resolve"; }; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java new file mode 100644 index 0000000000000..87d3a82b4aae6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockMockRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockActionCreatorTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + threadPool = createThreadPool(inferenceUtilityPool()); + } + + @After + public void shutdown() throws IOException { + terminate(threadPool); + } + + public void testEmbeddingsRequestAction() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedFloatResults = List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.0123F, -0.0123F })); + var mockedResult = new InferenceTextEmbeddingFloatResults(mockedFloatResults); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + null, + null, + null, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.0123F, -0.0123F })))); + + assertThat(sender.sendCount(), is(1)); + var sentInputs = sender.getInputs(); + assertThat(sentInputs.size(), is(1)); + assertThat(sentInputs.get(0), is("abc")); + } + } + + public void testEmbeddingsRequestAction_HandlesException() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedResult = new ElasticsearchException("mock exception"); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(sender.sendCount(), is(1)); + assertThat(sender.getInputs().size(), is(1)); + assertThat(thrownException.getMessage(), is("mock exception")); + } + } + + public void testCompletionRequestAction() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedChatCompletionResults = List.of(new ChatCompletionResults.Result("test input string")); + var mockedResult = new ChatCompletionResults(mockedChatCompletionResults); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockChatCompletionModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + null, + null, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("test input string")))); + + assertThat(sender.sendCount(), is(1)); + var sentInputs = sender.getInputs(); + assertThat(sentInputs.size(), is(1)); + assertThat(sentInputs.get(0), is("abc")); + } + } + + public void testChatCompletionRequestAction_HandlesException() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedResult = new ElasticsearchException("mock exception"); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockChatCompletionModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + null, + null, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(sender.sendCount(), is(1)); + assertThat(sender.getInputs().size(), is(1)); + assertThat(thrownException.getMessage(), is("mock exception")); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutorTests.java new file mode 100644 index 0000000000000..9326d39cb657c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutorTests.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ContentBlock; +import com.amazonaws.services.bedrockruntime.model.ConverseOutput; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; +import com.amazonaws.services.bedrockruntime.model.Message; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockTitanCompletionRequestEntity; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockTitanEmbeddingsRequestEntity; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; + +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.Charset; +import java.util.List; + +import static org.elasticsearch.xpack.inference.common.TruncatorTests.createTruncator; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockExecutorTests extends ESTestCase { + public void testExecute_EmbeddingsRequest_ForAmazonTitan() throws CharacterCodingException { + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + var truncator = createTruncator(); + var truncatedInput = truncator.truncate(List.of("abc")); + var requestEntity = new AmazonBedrockTitanEmbeddingsRequestEntity("abc"); + var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, model, requestEntity, null); + var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(null, getTestInvokeResult(TEST_AMAZON_TITAN_EMBEDDINGS_RESULT), null); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockEmbeddingsExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + var result = listener.actionGet(new TimeValue(30000)); + assertNotNull(result); + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.456F, 0.678F, 0.789F })))); + } + + public void testExecute_EmbeddingsRequest_ForCohere() throws CharacterCodingException { + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.COHERE, + "accesskey", + "secretkey" + ); + var requestEntity = new AmazonBedrockTitanEmbeddingsRequestEntity("abc"); + var truncator = createTruncator(); + var truncatedInput = truncator.truncate(List.of("abc")); + var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, model, requestEntity, null); + var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(null, getTestInvokeResult(TEST_COHERE_EMBEDDINGS_RESULT), null); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockEmbeddingsExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + var result = listener.actionGet(new TimeValue(30000)); + assertNotNull(result); + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.456F, 0.678F, 0.789F })))); + } + + public void testExecute_ChatCompletionRequest() throws CharacterCodingException { + var model = AmazonBedrockChatCompletionModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + var requestEntity = new AmazonBedrockTitanCompletionRequestEntity(List.of("abc"), null, null, 512); + var request = new AmazonBedrockChatCompletionRequest(model, requestEntity, null); + var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(getTestConverseResult("converse result"), null, null); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockChatCompletionExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + var result = listener.actionGet(new TimeValue(30000)); + assertNotNull(result); + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("converse result")))); + } + + public void testExecute_FailsProperly_WithElasticsearchException() { + var model = AmazonBedrockChatCompletionModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + var requestEntity = new AmazonBedrockTitanCompletionRequestEntity(List.of("abc"), null, null, 512); + var request = new AmazonBedrockChatCompletionRequest(model, requestEntity, null); + var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(null, null, new ElasticsearchException("test exception")); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockChatCompletionExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + + var exceptionThrown = assertThrows(ElasticsearchException.class, () -> listener.actionGet(new TimeValue(30000))); + assertThat(exceptionThrown.getMessage(), containsString("Failed to send request from inference entity id [id]")); + assertThat(exceptionThrown.getCause().getMessage(), containsString("test exception")); + } + + public static ConverseResult getTestConverseResult(String resultText) { + var message = new Message().withContent(new ContentBlock().withText(resultText)); + var converseOutput = new ConverseOutput().withMessage(message); + return new ConverseResult().withOutput(converseOutput); + } + + public static InvokeModelResult getTestInvokeResult(String resultJson) throws CharacterCodingException { + var result = new InvokeModelResult(); + result.setContentType("application/json"); + var encoder = Charset.forName("UTF-8").newEncoder(); + result.setBody(encoder.encode(CharBuffer.wrap(resultJson))); + return result; + } + + public static final String TEST_AMAZON_TITAN_EMBEDDINGS_RESULT = """ + { + "embedding": [0.123, 0.456, 0.678, 0.789], + "inputTextTokenCount": int + }"""; + + public static final String TEST_COHERE_EMBEDDINGS_RESULT = """ + { + "embeddings": [ + [0.123, 0.456, 0.678, 0.789] + ], + "id": string, + "response_type" : "embeddings_floats", + "texts": [string] + } + """; +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java new file mode 100644 index 0000000000000..873b2e22497c6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; + +import java.io.IOException; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneId; + +import static org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockInferenceClient.CLIENT_CACHE_EXPIRY_MINUTES; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; + +public class AmazonBedrockInferenceClientCacheTests extends ESTestCase { + public void testCache_ReturnsSameObject() throws IOException { + AmazonBedrockInferenceClientCache cacheInstance; + try (var cache = new AmazonBedrockInferenceClientCache(AmazonBedrockMockInferenceClient::create, null)) { + cacheInstance = cache; + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId", + "testregion", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access_key", + "secret_key" + ); + + var client = cache.getOrCreateClient(model, null); + + var secondModel = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId_two", + "testregion", + "a_different_model", + AmazonBedrockProvider.COHERE, + "access_key", + "secret_key" + ); + + var secondClient = cache.getOrCreateClient(secondModel, null); + assertThat(client, sameInstance(secondClient)); + + assertThat(cache.clientCount(), is(1)); + + var thirdClient = cache.getOrCreateClient(model, null); + assertThat(client, sameInstance(thirdClient)); + + assertThat(cache.clientCount(), is(1)); + } + assertThat(cacheInstance.clientCount(), is(0)); + } + + public void testCache_ItEvictsExpiredClients() throws IOException { + var clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()); + AmazonBedrockInferenceClientCache cacheInstance; + try (var cache = new AmazonBedrockInferenceClientCache(AmazonBedrockMockInferenceClient::create, clock)) { + cacheInstance = cache; + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId", + "testregion", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access_key", + "secret_key" + ); + + var client = cache.getOrCreateClient(model, null); + + var secondModel = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId_two", + "some_other_region", + "a_different_model", + AmazonBedrockProvider.COHERE, + "other_access_key", + "other_secret_key" + ); + + assertThat(cache.clientCount(), is(1)); + + var secondClient = cache.getOrCreateClient(secondModel, null); + assertThat(client, not(sameInstance(secondClient))); + + assertThat(cache.clientCount(), is(2)); + + // set clock to after expiry + cache.setClock(Clock.fixed(clock.instant().plus(Duration.ofMinutes(CLIENT_CACHE_EXPIRY_MINUTES + 1)), ZoneId.systemDefault())); + + // get another client, this will ensure flushExpiredClients is called + var regetSecondClient = cache.getOrCreateClient(secondModel, null); + assertThat(secondClient, sameInstance(regetSecondClient)); + + var regetFirstClient = cache.getOrCreateClient(model, null); + assertThat(client, not(sameInstance(regetFirstClient))); + } + assertThat(cacheInstance.clientCount(), is(0)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockClientCache.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockClientCache.java new file mode 100644 index 0000000000000..912967a9012d7 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockClientCache.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.io.IOException; + +public class AmazonBedrockMockClientCache implements AmazonBedrockClientCache { + private ConverseResult converseResult = null; + private InvokeModelResult invokeModelResult = null; + private ElasticsearchException exceptionToThrow = null; + + public AmazonBedrockMockClientCache() {} + + public AmazonBedrockMockClientCache( + @Nullable ConverseResult converseResult, + @Nullable InvokeModelResult invokeModelResult, + @Nullable ElasticsearchException exceptionToThrow + ) { + this.converseResult = converseResult; + this.invokeModelResult = invokeModelResult; + this.exceptionToThrow = exceptionToThrow; + } + + @Override + public AmazonBedrockBaseClient getOrCreateClient(AmazonBedrockModel model, TimeValue timeout) { + var client = (AmazonBedrockMockInferenceClient) AmazonBedrockMockInferenceClient.create(model, timeout); + client.setConverseResult(converseResult); + client.setInvokeModelResult(invokeModelResult); + client.setExceptionToThrow(exceptionToThrow); + return client; + } + + @Override + public void close() throws IOException { + // nothing to do + } + + public void setConverseResult(ConverseResult converseResult) { + this.converseResult = converseResult; + } + + public void setInvokeModelResult(InvokeModelResult invokeModelResult) { + this.invokeModelResult = invokeModelResult; + } + + public void setExceptionToThrow(ElasticsearchException exceptionToThrow) { + this.exceptionToThrow = exceptionToThrow; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockExecuteRequestSender.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockExecuteRequestSender.java new file mode 100644 index 0000000000000..b0df8a40e2551 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockExecuteRequestSender.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.function.Supplier; + +public class AmazonBedrockMockExecuteRequestSender extends AmazonBedrockExecuteOnlyRequestSender { + + private Queue results = new ConcurrentLinkedQueue<>(); + private Queue> inputs = new ConcurrentLinkedQueue<>(); + private int sendCounter = 0; + + public AmazonBedrockMockExecuteRequestSender(AmazonBedrockClientCache clientCache, ThrottlerManager throttlerManager) { + super(clientCache, throttlerManager); + } + + public void enqueue(Object result) { + results.add(result); + } + + public int sendCount() { + return sendCounter; + } + + public List getInputs() { + return inputs.remove(); + } + + @Override + protected AmazonBedrockExecutor createExecutor( + AmazonBedrockRequest awsRequest, + AmazonBedrockResponseHandler awsResponse, + Logger logger, + Supplier hasRequestTimedOutFunction, + ActionListener listener + ) { + setCacheResult(); + return super.createExecutor(awsRequest, awsResponse, logger, hasRequestTimedOutFunction, listener); + } + + private void setCacheResult() { + var mockCache = (AmazonBedrockMockClientCache) this.clientCache; + var result = results.remove(); + if (result instanceof ConverseResult converseResult) { + mockCache.setConverseResult(converseResult); + return; + } + + if (result instanceof InvokeModelResult invokeModelResult) { + mockCache.setInvokeModelResult(invokeModelResult); + return; + } + + if (result instanceof ElasticsearchException exception) { + mockCache.setExceptionToThrow(exception); + return; + } + + throw new RuntimeException("Unknown result type: " + result.getClass()); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockInferenceClient.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockInferenceClient.java new file mode 100644 index 0000000000000..dcbf8dfcbff01 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockInferenceClient.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.AmazonBedrockRuntimeAsync; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class AmazonBedrockMockInferenceClient extends AmazonBedrockInferenceClient { + private ConverseResult converseResult = null; + private InvokeModelResult invokeModelResult = null; + private ElasticsearchException exceptionToThrow = null; + + private Future converseResultFuture = new MockConverseResultFuture(); + private Future invokeModelResultFuture = new MockInvokeResultFuture(); + + public static AmazonBedrockBaseClient create(AmazonBedrockModel model, @Nullable TimeValue timeout) { + return new AmazonBedrockMockInferenceClient(model, timeout); + } + + protected AmazonBedrockMockInferenceClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + super(model, timeout); + } + + public void setExceptionToThrow(ElasticsearchException exceptionToThrow) { + this.exceptionToThrow = exceptionToThrow; + } + + public void setConverseResult(ConverseResult result) { + this.converseResult = result; + } + + public void setInvokeModelResult(InvokeModelResult result) { + this.invokeModelResult = result; + } + + @Override + protected AmazonBedrockRuntimeAsync createAmazonBedrockClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var runtimeClient = mock(AmazonBedrockRuntimeAsync.class); + doAnswer(invocation -> invokeModelResultFuture).when(runtimeClient).invokeModelAsync(any()); + doAnswer(invocation -> converseResultFuture).when(runtimeClient).converseAsync(any()); + + return runtimeClient; + } + + @Override + void close() {} + + private class MockConverseResultFuture implements Future { + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return false; + } + + @Override + public ConverseResult get() throws InterruptedException, ExecutionException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return converseResult; + } + + @Override + public ConverseResult get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return converseResult; + } + } + + private class MockInvokeResultFuture implements Future { + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return false; + } + + @Override + public InvokeModelResult get() throws InterruptedException, ExecutionException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return invokeModelResult; + } + + @Override + public InvokeModelResult get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return invokeModelResult; + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java new file mode 100644 index 0000000000000..e68beaf4c1eb5 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; + +import java.io.IOException; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; + +public class AmazonBedrockMockRequestSender implements Sender { + + public static class Factory extends AmazonBedrockRequestSender.Factory { + private final Sender sender; + + public Factory(ServiceComponents serviceComponents, ClusterService clusterService) { + super(serviceComponents, clusterService); + this.sender = new AmazonBedrockMockRequestSender(); + } + + public Sender createSender() { + return sender; + } + } + + private Queue results = new ConcurrentLinkedQueue<>(); + private Queue> inputs = new ConcurrentLinkedQueue<>(); + private int sendCounter = 0; + + public void enqueue(Object result) { + results.add(result); + } + + public int sendCount() { + return sendCounter; + } + + public List getInputs() { + return inputs.remove(); + } + + @Override + public void start() { + // do nothing + } + + @Override + public void send( + RequestManager requestCreator, + InferenceInputs inferenceInputs, + TimeValue timeout, + ActionListener listener + ) { + sendCounter++; + var docsInput = (DocumentsOnlyInput) inferenceInputs; + inputs.add(docsInput.getInputs()); + + if (results.isEmpty()) { + listener.onFailure(new ElasticsearchException("No results found")); + } else { + var resultObject = results.remove(); + if (resultObject instanceof InferenceServiceResults inferenceResult) { + listener.onResponse(inferenceResult); + } else if (resultObject instanceof Exception e) { + listener.onFailure(e); + } else { + throw new RuntimeException("Unknown result type: " + resultObject.getClass()); + } + } + } + + @Override + public void close() throws IOException { + // do nothing + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java new file mode 100644 index 0000000000000..7fa8a09d5bf12 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockChatCompletionRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockEmbeddingsRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockExecutorTests.TEST_AMAZON_TITAN_EMBEDDINGS_RESULT; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class AmazonBedrockRequestSenderTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private ThreadPool threadPool; + private final AtomicReference threadRef = new AtomicReference<>(); + + @Before + public void init() throws Exception { + threadPool = createThreadPool(inferenceUtilityPool()); + threadRef.set(null); + } + + @After + public void shutdown() throws IOException, InterruptedException { + if (threadRef.get() != null) { + threadRef.get().join(TIMEOUT.millis()); + } + + terminate(threadPool); + } + + public void testCreateSender_SendsEmbeddingsRequestAndReceivesResponse() throws Exception { + var senderFactory = createSenderFactory(threadPool, Settings.EMPTY); + var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class)); + requestSender.enqueue(AmazonBedrockExecutorTests.getTestInvokeResult(TEST_AMAZON_TITAN_EMBEDDINGS_RESULT)); + try (var sender = createSender(senderFactory, requestSender)) { + sender.start(); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var requestManager = new AmazonBedrockEmbeddingsRequestManager( + model, + serviceComponents.truncator(), + threadPool, + new TimeValue(30, TimeUnit.SECONDS) + ); + sender.send(requestManager, new DocumentsOnlyInput(List.of("abc")), null, listener); + + var result = listener.actionGet(TIMEOUT); + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.456F, 0.678F, 0.789F })))); + } + } + + public void testCreateSender_SendsCompletionRequestAndReceivesResponse() throws Exception { + var senderFactory = createSenderFactory(threadPool, Settings.EMPTY); + var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class)); + requestSender.enqueue(AmazonBedrockExecutorTests.getTestConverseResult("test response text")); + try (var sender = createSender(senderFactory, requestSender)) { + sender.start(); + + var model = AmazonBedrockChatCompletionModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + var requestManager = new AmazonBedrockChatCompletionRequestManager(model, threadPool, new TimeValue(30, TimeUnit.SECONDS)); + sender.send(requestManager, new DocumentsOnlyInput(List.of("abc")), null, listener); + + var result = listener.actionGet(TIMEOUT); + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("test response text")))); + } + } + + public static AmazonBedrockRequestSender.Factory createSenderFactory(ThreadPool threadPool, Settings settings) { + return new AmazonBedrockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, settings), + mockClusterServiceEmpty() + ); + } + + public static Sender createSender(AmazonBedrockRequestSender.Factory factory, AmazonBedrockExecuteOnlyRequestSender requestSender) { + return factory.createSender(requestSender); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..b91aab5410048 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntityTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockAI21LabsCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..89d5fec7efba6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntityTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockAnthropicCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), 1.0, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopK() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopKInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..8df5c7f32e529 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntityTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockCohereCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), 1.0, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopK() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopKInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestUtils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestUtils.java new file mode 100644 index 0000000000000..cbbe3c5554967 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestUtils.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ContentBlock; +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.Message; + +import org.elasticsearch.core.Strings; + +public final class AmazonBedrockConverseRequestUtils { + public static ConverseRequest getConverseRequest(String modelId, AmazonBedrockConverseRequestEntity requestEntity) { + var converseRequest = new ConverseRequest().withModelId(modelId); + converseRequest = requestEntity.addMessages(converseRequest); + converseRequest = requestEntity.addInferenceConfig(converseRequest); + converseRequest = requestEntity.addAdditionalModelFields(converseRequest); + return converseRequest; + } + + public static boolean doesConverseRequestHasMessage(ConverseRequest converseRequest, String expectedMessage) { + for (Message message : converseRequest.getMessages()) { + var content = message.getContent(); + for (ContentBlock contentBlock : content) { + if (contentBlock.getText().equals(expectedMessage)) { + return true; + } + } + } + return false; + } + + public static boolean doesConverseRequestHaveAnyTemperatureInput(ConverseRequest converseRequest) { + return converseRequest.getInferenceConfig() != null + && converseRequest.getInferenceConfig().getTemperature() != null + && (converseRequest.getInferenceConfig().getTemperature().isNaN() == false); + } + + public static boolean doesConverseRequestHaveAnyTopPInput(ConverseRequest converseRequest) { + return converseRequest.getInferenceConfig() != null + && converseRequest.getInferenceConfig().getTopP() != null + && (converseRequest.getInferenceConfig().getTopP().isNaN() == false); + } + + public static boolean doesConverseRequestHaveAnyMaxTokensInput(ConverseRequest converseRequest) { + return converseRequest.getInferenceConfig() != null && converseRequest.getInferenceConfig().getMaxTokens() != null; + } + + public static boolean doesConverseRequestHaveTemperatureInput(ConverseRequest converseRequest, Double temperature) { + return doesConverseRequestHaveAnyTemperatureInput(converseRequest) + && converseRequest.getInferenceConfig().getTemperature().equals(temperature.floatValue()); + } + + public static boolean doesConverseRequestHaveTopPInput(ConverseRequest converseRequest, Double topP) { + return doesConverseRequestHaveAnyTopPInput(converseRequest) + && converseRequest.getInferenceConfig().getTopP().equals(topP.floatValue()); + } + + public static boolean doesConverseRequestHaveMaxTokensInput(ConverseRequest converseRequest, Integer maxTokens) { + return doesConverseRequestHaveAnyMaxTokensInput(converseRequest) + && converseRequest.getInferenceConfig().getMaxTokens().equals(maxTokens); + } + + public static boolean doesConverseRequestHaveAnyTopKInput(ConverseRequest converseRequest) { + if (converseRequest.getAdditionalModelResponseFieldPaths() == null) { + return false; + } + + for (String fieldPath : converseRequest.getAdditionalModelResponseFieldPaths()) { + if (fieldPath.contains("{\"top_k\":")) { + return true; + } + } + return false; + } + + public static boolean doesConverseRequestHaveTopKInput(ConverseRequest converseRequest, Double topK) { + if (doesConverseRequestHaveAnyTopKInput(converseRequest) == false) { + return false; + } + + var checkString = Strings.format("{\"top_k\":%f}", topK.floatValue()); + for (String fieldPath : converseRequest.getAdditionalModelResponseFieldPaths()) { + if (fieldPath.contains(checkString)) { + return true; + } + } + return false; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..fa482669a0bb2 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntityTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockMetaCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..788625d3702b8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntityTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockMistralCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), 1.0, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopK() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopKInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..79fa387876c8b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntityTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockTitanCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntityTests.java new file mode 100644 index 0000000000000..fd8114f889d6a --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntityTests.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockJsonBuilder; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockCohereEmbeddingsRequestEntityTests extends ESTestCase { + public void testRequestEntity_GeneratesExpectedJsonBody() throws IOException { + var entity = new AmazonBedrockCohereEmbeddingsRequestEntity(List.of("test input")); + var builder = new AmazonBedrockJsonBuilder(entity); + var result = builder.getStringContent(); + assertThat(result, is("{\"texts\":[\"test input\"],\"input_type\":\"search_document\"}")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntityTests.java new file mode 100644 index 0000000000000..da98fa251fdc8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntityTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockJsonBuilder; + +import java.io.IOException; + +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockTitanEmbeddingsRequestEntityTests extends ESTestCase { + public void testRequestEntity_GeneratesExpectedJsonBody() throws IOException { + var entity = new AmazonBedrockTitanEmbeddingsRequestEntity("test input"); + var builder = new AmazonBedrockJsonBuilder(entity); + var result = builder.getStringContent(); + assertThat(result, is("{\"inputText\":\"test input\"}")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettingsTests.java new file mode 100644 index 0000000000000..904851842a6c8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettingsTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.hamcrest.CoreMatchers; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.ACCESS_KEY_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.SECRET_KEY_FIELD; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockSecretSettingsTests extends AbstractBWCWireSerializationTestCase { + + public void testIt_CreatesSettings_ReturnsNullFromMap_null() { + var secrets = AmazonBedrockSecretSettings.fromMap(null); + assertNull(secrets); + } + + public void testIt_CreatesSettings_FromMap_WithValues() { + var secrets = AmazonBedrockSecretSettings.fromMap( + new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest", SECRET_KEY_FIELD, "secrettest")) + ); + assertThat( + secrets, + is(new AmazonBedrockSecretSettings(new SecureString("accesstest".toCharArray()), new SecureString("secrettest".toCharArray()))) + ); + } + + public void testIt_CreatesSettings_FromMap_IgnoresExtraKeys() { + var secrets = AmazonBedrockSecretSettings.fromMap( + new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest", SECRET_KEY_FIELD, "secrettest", "extrakey", "extravalue")) + ); + assertThat( + secrets, + is(new AmazonBedrockSecretSettings(new SecureString("accesstest".toCharArray()), new SecureString("secrettest".toCharArray()))) + ); + } + + public void testIt_FromMap_ThrowsValidationException_AccessKeyMissing() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockSecretSettings.fromMap(new HashMap<>(Map.of(SECRET_KEY_FIELD, "secrettest"))) + ); + + assertThat( + thrownException.getMessage(), + containsString(Strings.format("[secret_settings] does not contain the required setting [%s]", ACCESS_KEY_FIELD)) + ); + } + + public void testIt_FromMap_ThrowsValidationException_SecretKeyMissing() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockSecretSettings.fromMap(new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest"))) + ); + + assertThat( + thrownException.getMessage(), + containsString(Strings.format("[secret_settings] does not contain the required setting [%s]", SECRET_KEY_FIELD)) + ); + } + + public void testToXContent_CreatesProperContent() throws IOException { + var secrets = AmazonBedrockSecretSettings.fromMap( + new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest", SECRET_KEY_FIELD, "secrettest")) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + secrets.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + assertThat(xContentResult, CoreMatchers.is(""" + {"access_key":"accesstest","secret_key":"secrettest"}""")); + } + + public static Map getAmazonBedrockSecretSettingsMap(String accessKey, String secretKey) { + return new HashMap(Map.of(ACCESS_KEY_FIELD, accessKey, SECRET_KEY_FIELD, secretKey)); + } + + @Override + protected AmazonBedrockSecretSettings mutateInstanceForVersion(AmazonBedrockSecretSettings instance, TransportVersion version) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockSecretSettings::new; + } + + @Override + protected AmazonBedrockSecretSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockSecretSettings mutateInstance(AmazonBedrockSecretSettings instance) throws IOException { + return randomValueOtherThan(instance, AmazonBedrockSecretSettingsTests::createRandom); + } + + private static AmazonBedrockSecretSettings createRandom() { + return new AmazonBedrockSecretSettings(new SecureString(randomAlphaOfLength(10)), new SecureString(randomAlphaOfLength(10))); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java new file mode 100644 index 0000000000000..00a840c8d4812 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java @@ -0,0 +1,1131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.Utils; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockMockRequestSender; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionServiceSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; +import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettingsTests.getAmazonBedrockSecretSettingsMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionServiceSettingsTests.createChatCompletionRequestSettingsMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettingsTests.createEmbeddingsRequestSettingsMap; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class AmazonBedrockServiceTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + threadPool = createThreadPool(inferenceUtilityPool()); + } + + @After + public void shutdown() throws IOException { + terminate(threadPool); + } + + public void testParseRequestConfig_CreatesAnAmazonBedrockModel() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + }, exception -> fail("Unexpected exception: " + exception)); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat(exception.getMessage(), is("The [amazonbedrock] service does not support task type [sparse_embedding]")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.SPARSE_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testCreateModel_ForEmbeddingsTask_InvalidProvider() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat(exception.getMessage(), is("The [text_embedding] task type for provider [anthropic] is not available")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "anthropic", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testCreateModel_TopKParameter_NotAvailable() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat(exception.getMessage(), is("The [top_k] task parameter is not available for provider [amazontitan]")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.COMPLETION, + getRequestConfigMap( + createChatCompletionRequestSettingsMap("region", "model", "amazontitan"), + getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createAmazonBedrockService()) { + var config = getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ); + + config.put("extra_key", "value"); + + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat( + exception.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + } + ); + + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { + try (var service = createAmazonBedrockService()) { + var serviceSettings = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null); + serviceSettings.put("extra_key", "value"); + + var config = getRequestConfigMap(serviceSettings, Map.of(), getAmazonBedrockSecretSettingsMap("access", "secret")); + + ActionListener modelVerificationListener = ActionListener.wrap((model) -> { + fail("Expected exception, but got model: " + model); + }, e -> { + assertThat(e, instanceOf(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + }); + + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + taskSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + + ActionListener modelVerificationListener = ActionListener.wrap((model) -> { + fail("Expected exception, but got model: " + model); + }, e -> { + assertThat(e, instanceOf(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + }); + + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + secretSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + + ActionListener modelVerificationListener = ActionListener.wrap((model) -> { + fail("Expected exception, but got model: " + model); + }, e -> { + assertThat(e, instanceOf(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + }); + + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_MovesModel() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + }, exception -> fail("Unexpected exception: " + exception)); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testCreateModel_ForEmbeddingsTask_DimensionsIsNotAllowed() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ValidationException.class)); + assertThat(exception.getMessage(), containsString("[service_settings] does not allow the setting [dimensions]")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", 512, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testParsePersistedConfigWithSecrets_CreatesAnAmazonBedrockEmbeddingsModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "amazontitan"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, Map.of(), secretSettingsMap); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfigWithSecrets( + "id", + TaskType.SPARSE_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse stored model [id] for [amazonbedrock] service, please delete and add the service again") + ); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + secretSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.secrets().put("extra_key", "value"); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + settingsMap.put("extra_key", "value"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + taskSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.COMPLETION, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockChatCompletionModel.class)); + + var settings = (AmazonBedrockChatCompletionServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.ANTHROPIC)); + var taskSettings = (AmazonBedrockChatCompletionTaskSettings) model.getTaskSettings(); + assertThat(taskSettings.temperature(), is(1.0)); + assertThat(taskSettings.topP(), is(0.5)); + assertThat(taskSettings.topK(), is(0.2)); + assertThat(taskSettings.maxNewTokens(), is(128)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfig_CreatesAnAmazonBedrockEmbeddingsModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_CreatesAnAmazonBedrockChatCompletionModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockChatCompletionModel.class)); + + var settings = (AmazonBedrockChatCompletionServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.ANTHROPIC)); + var taskSettings = (AmazonBedrockChatCompletionTaskSettings) model.getTaskSettings(); + assertThat(taskSettings.temperature(), is(1.0)); + assertThat(taskSettings.topP(), is(0.5)); + assertThat(taskSettings.topK(), is(0.2)); + assertThat(taskSettings.maxNewTokens(), is(128)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.SPARSE_EMBEDDING, persistedConfig.config()) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse stored model [id] for [amazonbedrock] service, please delete and add the service again") + ); + } + } + + public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + settingsMap.put("extra_key", "value"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + taskSettingsMap.put("extra_key", "value"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockChatCompletionModel.class)); + + var settings = (AmazonBedrockChatCompletionServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.ANTHROPIC)); + var taskSettings = (AmazonBedrockChatCompletionTaskSettings) model.getTaskSettings(); + assertThat(taskSettings.temperature(), is(1.0)); + assertThat(taskSettings.topP(), is(0.5)); + assertThat(taskSettings.topK(), is(0.2)); + assertThat(taskSettings.maxNewTokens(), is(128)); + assertNull(model.getSecretSettings()); + } + } + + public void testInfer_ThrowsErrorWhenModelIsNotAmazonBedrockModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + var mockModel = getInvalidModel("model_id", "service_name"); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + mockModel, + null, + List.of(""), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is("The internal model was invalid, please delete the service [service_name] with id [model_id] and add it again.") + ); + + verify(factory, times(1)).createSender(); + verify(sender, times(1)).start(); + } + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_SendsRequest_ForEmbeddingsModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access", + "secret" + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + model, + null, + List.of("abc"), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), Matchers.is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.678F })))); + } + } + } + + public void testInfer_SendsRequest_ForChatCompletionModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var mockResults = new ChatCompletionResults(List.of(new ChatCompletionResults.Result("test result"))); + requestSender.enqueue(mockResults); + + var model = AmazonBedrockChatCompletionModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access", + "secret" + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + model, + null, + List.of("abc"), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), Matchers.is(buildExpectationCompletion(List.of("test result")))); + } + } + } + + public void testCheckModelConfig_IncludesMaxTokens_ForEmbeddingsModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + 100, + null, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + var result = listener.actionGet(TIMEOUT); + assertThat( + result, + is( + AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 2, + false, + 100, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ) + ) + ); + var inputStrings = requestSender.getInputs(); + + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testCheckModelConfig_HasSimilarity_ForEmbeddingsModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + var result = listener.actionGet(TIMEOUT); + assertThat( + result, + is( + AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 2, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ) + ) + ); + var inputStrings = requestSender.getInputs(); + + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testCheckModelConfig_ThrowsIfEmbeddingSizeDoesNotMatchValueSetByUser() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 3, + true, + null, + null, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + + var exception = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + exception.getMessage(), + is( + "The retrieved embeddings size [2] does not match the size specified in the settings [3]. " + + "Please recreate the [id] configuration with the correct dimensions" + ) + ); + + var inputStrings = requestSender.getInputs(); + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensionsField_WhenNotSetByUser() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 100, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + var result = listener.actionGet(TIMEOUT); + assertThat( + result, + is( + AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 2, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ) + ) + ); + var inputStrings = requestSender.getInputs(); + + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testInfer_UnauthorizedResponse() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "us-east-1", + "amazon.titan-embed-text-v1", + AmazonBedrockProvider.AMAZONTITAN, + "_INVALID_AWS_ACCESS_KEY_", + "_INVALID_AWS_SECRET_KEY_" + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + model, + null, + List.of("abc"), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var exceptionThrown = assertThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(exceptionThrown.getCause().getMessage(), containsString("The security token included in the request is invalid")); + } + } + + public void testChunkedInfer_CallsInfer_ConvertsFloatResponse_ForEmbeddings() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var mockResults = new InferenceTextEmbeddingFloatResults( + List.of( + new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F }), + new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.456F, 0.987F }) + ) + ); + requestSender.enqueue(mockResults); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access", + "secret" + ); + PlainActionFuture> listener = new PlainActionFuture<>(); + service.chunkedInfer( + model, + List.of("abc", "xyz"), + new HashMap<>(), + InputType.INGEST, + new ChunkingOptions(null, null), + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var results = listener.actionGet(TIMEOUT); + assertThat(results, hasSize(2)); + { + assertThat(results.get(0), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(0); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("abc", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 0.123F, 0.678F }, floatResult.chunks().get(0).embedding(), 0.0f); + } + { + assertThat(results.get(1), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(1); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("xyz", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 0.456F, 0.987F }, floatResult.chunks().get(0).embedding(), 0.0f); + } + } + } + } + + private AmazonBedrockService createAmazonBedrockService() { + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + return new AmazonBedrockService(mock(HttpRequestSender.Factory.class), amazonBedrockFactory, createWithEmptySettings(threadPool)); + } + + private Map getRequestConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + var builtServiceSettings = new HashMap<>(); + builtServiceSettings.putAll(serviceSettings); + builtServiceSettings.putAll(secretSettings); + + return new HashMap<>( + Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) + ); + } + + private Utils.PersistedConfig getPersistedConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + + return new Utils.PersistedConfig( + new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), + new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModelTests.java new file mode 100644 index 0000000000000..22173943ff432 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModelTests.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; + +public class AmazonBedrockChatCompletionModelTests extends ESTestCase { + public void testOverrideWith_OverridesWithoutValues() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 1.0, + 0.5, + 0.6, + 512, + null, + "access_key", + "secret_key" + ); + var requestTaskSettingsMap = getChatCompletionTaskSettingsMap(null, null, null, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettingsMap); + + assertThat(overriddenModel, sameInstance(overriddenModel)); + } + + public void testOverrideWith_temperature() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 1.0, + null, + null, + null, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(0.5, null, null, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 0.5, + null, + null, + null, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public void testOverrideWith_topP() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + 0.8, + null, + null, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(null, 0.5, null, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + 0.5, + null, + null, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public void testOverrideWith_topK() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + 1.0, + null, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(null, null, 0.8, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + 0.8, + null, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public void testOverrideWith_maxNewTokens() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + 512, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(null, null, null, 128); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + 128, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public static AmazonBedrockChatCompletionModel createModel( + String id, + String region, + String model, + AmazonBedrockProvider provider, + String accessKey, + String secretKey + ) { + return createModel(id, region, model, provider, null, null, null, null, null, accessKey, secretKey); + } + + public static AmazonBedrockChatCompletionModel createModel( + String id, + String region, + String model, + AmazonBedrockProvider provider, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens, + @Nullable RateLimitSettings rateLimitSettings, + String accessKey, + String secretKey + ) { + return new AmazonBedrockChatCompletionModel( + id, + TaskType.COMPLETION, + "amazonbedrock", + new AmazonBedrockChatCompletionServiceSettings(region, model, provider, rateLimitSettings), + new AmazonBedrockChatCompletionTaskSettings(temperature, topP, topK, maxNewTokens), + new AmazonBedrockSecretSettings(new SecureString(accessKey), new SecureString(secretKey)) + ); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..681088c786b6b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettingsTests.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.MatcherAssert; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockChatCompletionRequestTaskSettingsTests extends ESTestCase { + public void testFromMap_ReturnsEmptySettings_WhenTheMapIsEmpty() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of())); + assertThat(settings, is(AmazonBedrockChatCompletionRequestTaskSettings.EMPTY_SETTINGS)); + } + + public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of("key", "model"))); + assertThat(settings, is(AmazonBedrockChatCompletionRequestTaskSettings.EMPTY_SETTINGS)); + } + + public void testFromMap_ReturnsTemperature() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TEMPERATURE_FIELD, 0.1))); + assertThat(settings.temperature(), is(0.1)); + } + + public void testFromMap_ReturnsTopP() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_P_FIELD, 0.1))); + assertThat(settings.topP(), is(0.1)); + } + + public void testFromMap_ReturnsDoSample() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_K_FIELD, 0.3))); + assertThat(settings.topK(), is(0.3)); + } + + public void testFromMap_ReturnsMaxNewTokens() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(MAX_NEW_TOKENS_FIELD, 512))); + assertThat(settings.maxNewTokens(), is(512)); + } + + public void testFromMap_TemperatureIsInvalidValue_ThrowsValidationException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TEMPERATURE_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [temperature] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopPIsInvalidValue_ThrowsValidationException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_P_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [top_p] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopKIsInvalidValue_ThrowsValidationException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_K_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString("field [top_k] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ); + } + + public void testFromMap_MaxTokensIsInvalidValue_ThrowsStatusException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(MAX_NEW_TOKENS_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString("field [max_new_tokens] is not of the expected type. The value [invalid] cannot be converted to a [Integer]") + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettingsTests.java new file mode 100644 index 0000000000000..90868530d8df8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettingsTests.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; +import org.hamcrest.CoreMatchers; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockChatCompletionServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AmazonBedrockChatCompletionServiceSettings> { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var serviceSettings = AmazonBedrockChatCompletionServiceSettings.fromMap( + createChatCompletionRequestSettingsMap(region, model, provider), + ConfigurationParseContext.REQUEST + ); + + assertThat( + serviceSettings, + is(new AmazonBedrockChatCompletionServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, null)) + ); + } + + public void testFromMap_RequestWithRateLimit_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var settingsMap = createChatCompletionRequestSettingsMap(region, model, provider); + settingsMap.put(RateLimitSettings.FIELD_NAME, new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 3))); + + var serviceSettings = AmazonBedrockChatCompletionServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST); + + assertThat( + serviceSettings, + is(new AmazonBedrockChatCompletionServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, new RateLimitSettings(3))) + ); + } + + public void testFromMap_Persistent_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var settingsMap = createChatCompletionRequestSettingsMap(region, model, provider); + var serviceSettings = AmazonBedrockChatCompletionServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is(new AmazonBedrockChatCompletionServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, null)) + ); + } + + public void testToXContent_WritesAllValues() throws IOException { + var entity = new AmazonBedrockChatCompletionServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + new RateLimitSettings(3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":3}}""")); + } + + public static HashMap createChatCompletionRequestSettingsMap(String region, String model, String provider) { + return new HashMap(Map.of(REGION_FIELD, region, MODEL_FIELD, model, PROVIDER_FIELD, provider)); + } + + @Override + protected AmazonBedrockChatCompletionServiceSettings mutateInstanceForVersion( + AmazonBedrockChatCompletionServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockChatCompletionServiceSettings::new; + } + + @Override + protected AmazonBedrockChatCompletionServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockChatCompletionServiceSettings mutateInstance(AmazonBedrockChatCompletionServiceSettings instance) + throws IOException { + return randomValueOtherThan(instance, AmazonBedrockChatCompletionServiceSettingsTests::createRandom); + } + + private static AmazonBedrockChatCompletionServiceSettings createRandom() { + return new AmazonBedrockChatCompletionServiceSettings( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomFrom(AmazonBedrockProvider.values()), + RateLimitSettingsTests.createRandom() + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettingsTests.java new file mode 100644 index 0000000000000..0d5440c6d2cf8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettingsTests.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockChatCompletionTaskSettingsTests extends AbstractBWCWireSerializationTestCase< + AmazonBedrockChatCompletionTaskSettings> { + + public void testFromMap_AllValues() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + assertEquals( + new AmazonBedrockChatCompletionTaskSettings(1.0, 0.5, 0.6, 512), + AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap) + ); + } + + public void testFromMap_TemperatureIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(TEMPERATURE_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [temperature] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopPIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(TOP_P_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [top_p] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopKIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(TOP_K_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString("field [top_k] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ); + } + + public void testFromMap_MaxNewTokensIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(MAX_NEW_TOKENS_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [max_new_tokens] is not of the expected type. The value [invalid] cannot be converted to a [Integer]") + ) + ); + } + + public void testFromMap_WithNoValues_DoesNotThrowException() { + var taskMap = AmazonBedrockChatCompletionTaskSettings.fromMap(new HashMap(Map.of())); + assertNull(taskMap.temperature()); + assertNull(taskMap.topP()); + assertNull(taskMap.topK()); + assertNull(taskMap.maxNewTokens()); + } + + public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, AmazonBedrockChatCompletionTaskSettings.EMPTY_SETTINGS); + MatcherAssert.assertThat(overrideSettings, is(settings)); + } + + public void testOverrideWith_UsesTemperatureOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(0.3, null, null, null) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(0.3, 0.5, 0.6, 512))); + } + + public void testOverrideWith_UsesTopPOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(null, 0.2, null, null) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(1.0, 0.2, 0.6, 512))); + } + + public void testOverrideWith_UsesDoSampleOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(null, null, 0.1, null) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(1.0, 0.5, 0.1, 512))); + } + + public void testOverrideWith_UsesMaxNewTokensOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(null, null, null, 128) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(1.0, 0.5, 0.6, 128))); + } + + public void testToXContent_WithoutParameters() throws IOException { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(null, null, null, null)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is("{}")); + } + + public void testToXContent_WithParameters() throws IOException { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"temperature":1.0,"top_p":0.5,"top_k":0.6,"max_new_tokens":512}""")); + } + + public static Map getChatCompletionTaskSettingsMap( + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens + ) { + var map = new HashMap(); + + if (temperature != null) { + map.put(TEMPERATURE_FIELD, temperature); + } + + if (topP != null) { + map.put(TOP_P_FIELD, topP); + } + + if (topK != null) { + map.put(TOP_K_FIELD, topK); + } + + if (maxNewTokens != null) { + map.put(MAX_NEW_TOKENS_FIELD, maxNewTokens); + } + + return map; + } + + @Override + protected AmazonBedrockChatCompletionTaskSettings mutateInstanceForVersion( + AmazonBedrockChatCompletionTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockChatCompletionTaskSettings::new; + } + + @Override + protected AmazonBedrockChatCompletionTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockChatCompletionTaskSettings mutateInstance(AmazonBedrockChatCompletionTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, AmazonBedrockChatCompletionTaskSettingsTests::createRandom); + } + + private static AmazonBedrockChatCompletionTaskSettings createRandom() { + return new AmazonBedrockChatCompletionTaskSettings( + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Integer[] { null, randomNonNegativeInt() }) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModelTests.java new file mode 100644 index 0000000000000..711e3cbb5a511 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModelTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; + +public class AmazonBedrockEmbeddingsModelTests extends ESTestCase { + + public void testCreateModel_withTaskSettings_shouldFail() { + var baseModel = createModel("id", "region", "model", AmazonBedrockProvider.AMAZONTITAN, "accesskey", "secretkey"); + var thrownException = assertThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsModel.of(baseModel, Map.of("testkey", "testvalue")) + ); + assertThat(thrownException.getMessage(), containsString("Amazon Bedrock embeddings model cannot have task settings")); + } + + // model creation only - no tests to define, but we want to have the public createModel + // method available + + public static AmazonBedrockEmbeddingsModel createModel( + String inferenceId, + String region, + String model, + AmazonBedrockProvider provider, + String accessKey, + String secretKey + ) { + return createModel(inferenceId, region, model, provider, null, false, null, null, new RateLimitSettings(240), accessKey, secretKey); + } + + public static AmazonBedrockEmbeddingsModel createModel( + String inferenceId, + String region, + String model, + AmazonBedrockProvider provider, + @Nullable Integer dimensions, + boolean dimensionsSetByUser, + @Nullable Integer maxTokens, + @Nullable SimilarityMeasure similarity, + RateLimitSettings rateLimitSettings, + String accessKey, + String secretKey + ) { + return new AmazonBedrockEmbeddingsModel( + inferenceId, + TaskType.TEXT_EMBEDDING, + "amazonbedrock", + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + provider, + dimensions, + dimensionsSetByUser, + maxTokens, + similarity, + rateLimitSettings + ), + new EmptyTaskSettings(), + new AmazonBedrockSecretSettings(new SecureString(accessKey), new SecureString(secretKey)) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettingsTests.java new file mode 100644 index 0000000000000..a100b89e1db6e --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettingsTests.java @@ -0,0 +1,404 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; +import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.DIMENSIONS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockEmbeddingsServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AmazonBedrockEmbeddingsServiceSettings> { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap( + createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, SimilarityMeasure.COSINE), + ConfigurationParseContext.REQUEST + ); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + null + ) + ) + ); + } + + public void testFromMap_RequestWithRateLimit_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, SimilarityMeasure.COSINE); + settingsMap.put(RateLimitSettings.FIELD_NAME, new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 3))); + + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + new RateLimitSettings(3) + ) + ) + ); + } + + public void testFromMap_Request_DimensionsSetByUser_IsFalse_WhenDimensionsAreNotPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, SimilarityMeasure.COSINE); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + null + ) + ) + ); + } + + public void testFromMap_Request_DimensionsSetByUser_ShouldThrowWhenPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, true, maxInputTokens, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("Validation Failed: 1: [service_settings] does not allow the setting [%s];", DIMENSIONS_SET_BY_USER) + ) + ); + } + + public void testFromMap_Request_Dimensions_ShouldThrowWhenPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var dims = 128; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, dims, null, null, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString(Strings.format("[service_settings] does not allow the setting [%s]", DIMENSIONS)) + ); + } + + public void testFromMap_Request_MaxTokensShouldBePositiveInteger() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = -128; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString(Strings.format("[%s] must be a positive integer", MAX_INPUT_TOKENS)) + ); + } + + public void testFromMap_Persistent_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var dims = 1536; + var maxInputTokens = 512; + + var settingsMap = createEmbeddingsRequestSettingsMap( + region, + model, + provider, + dims, + false, + maxInputTokens, + SimilarityMeasure.COSINE + ); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + dims, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + null + ) + ) + ); + } + + public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsIsNull() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, true, null, null); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is(new AmazonBedrockEmbeddingsServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, null, true, null, null, null)) + ); + } + + public void testFromMap_PersistentContext_DoesNotThrowException_WhenSimilarityIsPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, true, null, SimilarityMeasure.DOT_PRODUCT); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + true, + null, + SimilarityMeasure.DOT_PRODUCT, + null + ) + ) + ); + } + + public void testFromMap_PersistentContext_ThrowsException_WhenDimensionsSetByUserIsNull() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, 1, null, null, null); + + var exception = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT) + ); + + assertThat( + exception.getMessage(), + containsString("Validation Failed: 1: [service_settings] does not contain the required setting [dimensions_set_by_user];") + ); + } + + public void testToXContent_WritesDimensionsSetByUserTrue() throws IOException { + var entity = new AmazonBedrockEmbeddingsServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + null, + true, + null, + null, + new RateLimitSettings(2) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":2},"dimensions_set_by_user":true}""")); + } + + public void testToXContent_WritesAllValues() throws IOException { + var entity = new AmazonBedrockEmbeddingsServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + 1024, + false, + 512, + null, + new RateLimitSettings(3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":3},"dimensions":1024,"max_input_tokens":512,"dimensions_set_by_user":false}""")); + } + + public void testToFilteredXContent_WritesAllValues_ExceptDimensionsSetByUser() throws IOException { + var entity = new AmazonBedrockEmbeddingsServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + 1024, + false, + 512, + null, + new RateLimitSettings(3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + var filteredXContent = entity.getFilteredXContentObject(); + filteredXContent.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":3},"dimensions":1024,"max_input_tokens":512}""")); + } + + public static HashMap createEmbeddingsRequestSettingsMap( + String region, + String model, + String provider, + @Nullable Integer dimensions, + @Nullable Boolean dimensionsSetByUser, + @Nullable Integer maxTokens, + @Nullable SimilarityMeasure similarityMeasure + ) { + var map = new HashMap(Map.of(REGION_FIELD, region, MODEL_FIELD, model, PROVIDER_FIELD, provider)); + + if (dimensions != null) { + map.put(ServiceFields.DIMENSIONS, dimensions); + } + + if (dimensionsSetByUser != null) { + map.put(DIMENSIONS_SET_BY_USER, dimensionsSetByUser.equals(Boolean.TRUE)); + } + + if (maxTokens != null) { + map.put(ServiceFields.MAX_INPUT_TOKENS, maxTokens); + } + + if (similarityMeasure != null) { + map.put(SIMILARITY, similarityMeasure.toString()); + } + + return map; + } + + @Override + protected AmazonBedrockEmbeddingsServiceSettings mutateInstanceForVersion( + AmazonBedrockEmbeddingsServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockEmbeddingsServiceSettings::new; + } + + @Override + protected AmazonBedrockEmbeddingsServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockEmbeddingsServiceSettings mutateInstance(AmazonBedrockEmbeddingsServiceSettings instance) throws IOException { + return randomValueOtherThan(instance, AmazonBedrockEmbeddingsServiceSettingsTests::createRandom); + } + + private static AmazonBedrockEmbeddingsServiceSettings createRandom() { + return new AmazonBedrockEmbeddingsServiceSettings( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomFrom(AmazonBedrockProvider.values()), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomBoolean(), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomFrom(new SimilarityMeasure[] { null, randomFrom(SimilarityMeasure.values()) }), + RateLimitSettingsTests.createRandom() + ); + } +} From 6b185686a5f2c89468518bcd72f0cecbda86a718 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 5 Jul 2024 22:28:46 -0700 Subject: [PATCH 015/406] Introduce compute listener (#110400) (#110555) Currently, if a child request fails, we automatically trigger cancellation for ES|QL requests. This can result in TaskCancelledException being collected by the RefCountingListener first, which then returns that exception to the caller. For example, if we encounter a CircuitBreakingException (429), we might incorrectly return a TaskCancelledException (400) instead. This change introduces the ComputeListener, a variant of RefCountingListener, which selects the most appropriate exception to return to the caller. I also integrated the following features into ComputeListener to simplify ComputeService: - Automatic cancellation of sub-tasks on failure. - Collection of profiles from sub-tasks. - Collection of response headers from sub-tasks. --- docs/changelog/110400.yaml | 5 + .../compute/operator/AsyncOperator.java | 31 +-- .../compute/operator/DriverRunner.java | 25 +- .../compute/operator/FailureCollector.java | 112 ++++++++ .../exchange/ExchangeSourceHandler.java | 33 +-- .../operator/FailureCollectorTests.java | 90 +++++++ .../xpack/esql/plugin/ComputeListener.java | 90 +++++++ .../xpack/esql/plugin/ComputeService.java | 254 +++++++----------- .../esql/plugin/ComputeListenerTests.java | 246 +++++++++++++++++ 9 files changed, 658 insertions(+), 228 deletions(-) create mode 100644 docs/changelog/110400.yaml create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java diff --git a/docs/changelog/110400.yaml b/docs/changelog/110400.yaml new file mode 100644 index 0000000000000..f2810eba214f1 --- /dev/null +++ b/docs/changelog/110400.yaml @@ -0,0 +1,5 @@ +pr: 110400 +summary: Introduce compute listener +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 061cefc86bed0..0fed88370a144 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -21,13 +21,11 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; /** @@ -40,7 +38,7 @@ public abstract class AsyncOperator implements Operator { private volatile SubscribableListener blockedFuture; private final Map buffers = ConcurrentCollections.newConcurrentMap(); - private final AtomicReference failure = new AtomicReference<>(); + private final FailureCollector failureCollector = new FailureCollector(); private final DriverContext driverContext; private final int maxOutstandingRequests; @@ -77,7 +75,7 @@ public boolean needsInput() { @Override public void addInput(Page input) { - if (failure.get() != null) { + if (failureCollector.hasFailure()) { input.releaseBlocks(); return; } @@ -90,7 +88,7 @@ public void addInput(Page input) { onSeqNoCompleted(seqNo); }, e -> { releasePageOnAnyThread(input); - onFailure(e); + failureCollector.unwrapAndCollect(e); onSeqNoCompleted(seqNo); }); final long startNanos = System.nanoTime(); @@ -121,31 +119,12 @@ private void releasePageOnAnyThread(Page page) { protected abstract void doClose(); - private void onFailure(Exception e) { - failure.getAndUpdate(first -> { - if (first == null) { - return e; - } - // ignore subsequent TaskCancelledException exceptions as they don't provide useful info. - if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { - return first; - } - if (ExceptionsHelper.unwrap(first, TaskCancelledException.class) != null) { - return e; - } - if (ExceptionsHelper.unwrapCause(first) != ExceptionsHelper.unwrapCause(e)) { - first.addSuppressed(e); - } - return first; - }); - } - private void onSeqNoCompleted(long seqNo) { checkpoint.markSeqNoAsProcessed(seqNo); if (checkpoint.getPersistedCheckpoint() < checkpoint.getProcessedCheckpoint()) { notifyIfBlocked(); } - if (closed || failure.get() != null) { + if (closed || failureCollector.hasFailure()) { discardPages(); } } @@ -164,7 +143,7 @@ private void notifyIfBlocked() { } private void checkFailure() { - Exception e = failure.get(); + Exception e = failureCollector.getFailure(); if (e != null) { discardPages(); throw ExceptionsHelper.convertToElastic(e); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java index 5de017fbd279e..b427a36566f11 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java @@ -7,14 +7,11 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.tasks.TaskCancelledException; import java.util.List; -import java.util.concurrent.atomic.AtomicReference; /** * Run a set of drivers to completion. @@ -35,8 +32,8 @@ public DriverRunner(ThreadContext threadContext) { * Run all drivers to completion asynchronously. */ public void runToCompletion(List drivers, ActionListener listener) { - AtomicReference failure = new AtomicReference<>(); var responseHeadersCollector = new ResponseHeadersCollector(threadContext); + var failure = new FailureCollector(); CountDown counter = new CountDown(drivers.size()); for (int i = 0; i < drivers.size(); i++) { Driver driver = drivers.get(i); @@ -48,23 +45,7 @@ public void onResponse(Void unused) { @Override public void onFailure(Exception e) { - failure.getAndUpdate(first -> { - if (first == null) { - return e; - } - if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { - return first; - } else { - if (ExceptionsHelper.unwrap(first, TaskCancelledException.class) != null) { - return e; - } else { - if (first != e) { - first.addSuppressed(e); - } - return first; - } - } - }); + failure.unwrapAndCollect(e); for (Driver d : drivers) { if (driver != d) { d.cancel("Driver [" + driver.sessionId() + "] was cancelled or failed"); @@ -77,7 +58,7 @@ private void done() { responseHeadersCollector.collect(); if (counter.countDown()) { responseHeadersCollector.finish(); - Exception error = failure.get(); + Exception error = failure.getFailure(); if (error != null) { listener.onFailure(error); } else { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java new file mode 100644 index 0000000000000..99edab038af31 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.transport.TransportException; + +import java.util.List; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * {@code FailureCollector} is responsible for collecting exceptions that occur in the compute engine. + * The collected exceptions are categorized into task-cancelled and non-task-cancelled exceptions. + * To limit memory usage, this class collects only the first 10 exceptions in each category by default. + * When returning the accumulated failure to the caller, this class prefers non-task-cancelled exceptions + * over task-cancelled ones as they are more useful for diagnosing issues. + */ +public final class FailureCollector { + private final Queue cancelledExceptions = ConcurrentCollections.newQueue(); + private final AtomicInteger cancelledExceptionsCount = new AtomicInteger(); + + private final Queue nonCancelledExceptions = ConcurrentCollections.newQueue(); + private final AtomicInteger nonCancelledExceptionsCount = new AtomicInteger(); + + private final int maxExceptions; + private volatile boolean hasFailure = false; + private Exception finalFailure = null; + + public FailureCollector() { + this(10); + } + + public FailureCollector(int maxExceptions) { + if (maxExceptions <= 0) { + throw new IllegalArgumentException("maxExceptions must be at least one"); + } + this.maxExceptions = maxExceptions; + } + + public void unwrapAndCollect(Exception originEx) { + final Exception e = originEx instanceof TransportException + ? (originEx.getCause() instanceof Exception cause ? cause : new ElasticsearchException(originEx.getCause())) + : originEx; + if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { + if (cancelledExceptionsCount.incrementAndGet() <= maxExceptions) { + cancelledExceptions.add(e); + } + } else { + if (nonCancelledExceptionsCount.incrementAndGet() <= maxExceptions) { + nonCancelledExceptions.add(e); + } + } + hasFailure = true; + } + + /** + * @return {@code true} if any failure has been collected, {@code false} otherwise + */ + public boolean hasFailure() { + return hasFailure; + } + + /** + * Returns the accumulated failure, preferring non-task-cancelled exceptions over task-cancelled ones. + * Once this method builds the failure, incoming failures are discarded. + * + * @return the accumulated failure, or {@code null} if no failure has been collected + */ + public Exception getFailure() { + if (hasFailure == false) { + return null; + } + synchronized (this) { + if (finalFailure == null) { + finalFailure = buildFailure(); + } + return finalFailure; + } + } + + private Exception buildFailure() { + assert hasFailure; + assert Thread.holdsLock(this); + int total = 0; + Exception first = null; + for (var exceptions : List.of(nonCancelledExceptions, cancelledExceptions)) { + for (Exception e : exceptions) { + if (first == null) { + first = e; + total++; + } else if (first != e) { + first.addSuppressed(e); + total++; + } + if (total >= maxExceptions) { + return first; + } + } + } + assert first != null; + return first; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index adce8d8a88407..77b535949eb9d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -7,21 +7,18 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.FailureCollector; import org.elasticsearch.core.Releasable; -import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.transport.TransportException; import java.util.List; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; /** * An {@link ExchangeSourceHandler} asynchronously fetches pages and status from multiple {@link RemoteSink}s @@ -37,7 +34,7 @@ public final class ExchangeSourceHandler { private final PendingInstances outstandingSinks; private final PendingInstances outstandingSources; - private final AtomicReference failure = new AtomicReference<>(); + private final FailureCollector failure = new FailureCollector(); public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor) { this.buffer = new ExchangeBuffer(maxBufferSize); @@ -54,7 +51,7 @@ private class ExchangeSourceImpl implements ExchangeSource { } private void checkFailure() { - Exception e = failure.get(); + Exception e = failure.getFailure(); if (e != null) { throw ExceptionsHelper.convertToElastic(e); } @@ -172,7 +169,7 @@ void fetchPage() { while (loopControl.isRunning()) { loopControl.exiting(); // finish other sinks if one of them failed or source no longer need pages. - boolean toFinishSinks = buffer.noMoreInputs() || failure.get() != null; + boolean toFinishSinks = buffer.noMoreInputs() || failure.hasFailure(); remoteSink.fetchPageAsync(toFinishSinks, ActionListener.wrap(resp -> { Page page = resp.takePage(); if (page != null) { @@ -199,26 +196,8 @@ void fetchPage() { loopControl.exited(); } - void onSinkFailed(Exception originEx) { - final Exception e = originEx instanceof TransportException - ? (originEx.getCause() instanceof Exception cause ? cause : new ElasticsearchException(originEx.getCause())) - : originEx; - failure.getAndUpdate(first -> { - if (first == null) { - return e; - } - // ignore subsequent TaskCancelledException exceptions as they don't provide useful info. - if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { - return first; - } - if (ExceptionsHelper.unwrap(first, TaskCancelledException.class) != null) { - return e; - } - if (ExceptionsHelper.unwrapCause(first) != ExceptionsHelper.unwrapCause(e)) { - first.addSuppressed(e); - } - return first; - }); + void onSinkFailed(Exception e) { + failure.unwrapAndCollect(e); buffer.waitForReading().onResponse(null); // resume the Driver if it is being blocked on reading onSinkComplete(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java new file mode 100644 index 0000000000000..d5fa0a1eaecc9 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.RemoteTransportException; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.lessThan; + +public class FailureCollectorTests extends ESTestCase { + + public void testCollect() throws Exception { + int maxExceptions = between(1, 100); + FailureCollector collector = new FailureCollector(maxExceptions); + List cancelledExceptions = List.of( + new TaskCancelledException("user request"), + new TaskCancelledException("cross "), + new TaskCancelledException("on failure") + ); + List nonCancelledExceptions = List.of( + new IOException("i/o simulated"), + new IOException("disk broken"), + new CircuitBreakingException("low memory", CircuitBreaker.Durability.TRANSIENT), + new CircuitBreakingException("over limit", CircuitBreaker.Durability.TRANSIENT) + ); + List failures = Stream.concat( + IntStream.range(0, between(1, 500)).mapToObj(n -> randomFrom(cancelledExceptions)), + IntStream.range(0, between(1, 500)).mapToObj(n -> randomFrom(nonCancelledExceptions)) + ).collect(Collectors.toList()); + Randomness.shuffle(failures); + Queue queue = new ConcurrentLinkedQueue<>(failures); + Thread[] threads = new Thread[between(1, 4)]; + CyclicBarrier carrier = new CyclicBarrier(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + try { + carrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + Exception ex; + while ((ex = queue.poll()) != null) { + if (randomBoolean()) { + collector.unwrapAndCollect(ex); + } else { + collector.unwrapAndCollect(new RemoteTransportException("disconnect", ex)); + } + if (randomBoolean()) { + assertTrue(collector.hasFailure()); + } + } + }); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertTrue(collector.hasFailure()); + Exception failure = collector.getFailure(); + assertNotNull(failure); + assertThat(failure, Matchers.in(nonCancelledExceptions)); + assertThat(failure.getSuppressed().length, lessThan(maxExceptions)); + } + + public void testEmpty() { + FailureCollector collector = new FailureCollector(5); + assertFalse(collector.hasFailure()); + assertNull(collector.getFailure()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java new file mode 100644 index 0000000000000..f8f35bb6f0b4f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.FailureCollector; +import org.elasticsearch.compute.operator.ResponseHeadersCollector; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A variant of {@link RefCountingListener} with the following differences: + * 1. Automatically cancels sub tasks on failure. + * 2. Collects driver profiles from sub tasks. + * 3. Collects response headers from sub tasks, specifically warnings emitted during compute + * 4. Collects failures and returns the most appropriate exception to the caller. + */ +final class ComputeListener implements Releasable { + private static final Logger LOGGER = LogManager.getLogger(ComputeService.class); + + private final RefCountingListener refs; + private final FailureCollector failureCollector = new FailureCollector(); + private final AtomicBoolean cancelled = new AtomicBoolean(); + private final CancellableTask task; + private final TransportService transportService; + private final List collectedProfiles; + private final ResponseHeadersCollector responseHeaders; + + ComputeListener(TransportService transportService, CancellableTask task, ActionListener delegate) { + this.transportService = transportService; + this.task = task; + this.responseHeaders = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); + this.collectedProfiles = Collections.synchronizedList(new ArrayList<>()); + this.refs = new RefCountingListener(1, ActionListener.wrap(ignored -> { + responseHeaders.finish(); + var result = new ComputeResponse(collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList()); + delegate.onResponse(result); + }, e -> delegate.onFailure(failureCollector.getFailure()))); + } + + /** + * Acquires a new listener that doesn't collect result + */ + ActionListener acquireAvoid() { + return refs.acquire().delegateResponse((l, e) -> { + failureCollector.unwrapAndCollect(e); + try { + if (cancelled.compareAndSet(false, true)) { + LOGGER.debug("cancelling ESQL task {} on failure", task); + transportService.getTaskManager().cancelTaskAndDescendants(task, "cancelled on failure", false, ActionListener.noop()); + } + } finally { + l.onFailure(e); + } + }); + } + + /** + * Acquires a new listener that collects compute result. This listener will also collects warnings emitted during compute + */ + ActionListener acquireCompute() { + return acquireAvoid().map(resp -> { + responseHeaders.collect(); + if (resp != null && resp.getProfiles().isEmpty() == false) { + collectedProfiles.addAll(resp.getProfiles()); + } + return null; + }); + } + + @Override + public void close() { + refs.close(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 4ebc4af258134..b26fbc7ac4804 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -29,7 +29,6 @@ import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.DriverTaskRunner; -import org.elasticsearch.compute.operator.ResponseHeadersCollector; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.compute.operator.exchange.ExchangeSink; import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; @@ -81,7 +80,6 @@ import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; @@ -172,13 +170,16 @@ public void execute( null, null ); - runCompute( - rootTask, - computeContext, - coordinatorPlan, - listener.map(driverProfiles -> new Result(collectedPages, driverProfiles)) - ); - return; + try ( + var computeListener = new ComputeListener( + transportService, + rootTask, + listener.map(r -> new Result(collectedPages, r.getProfiles())) + ) + ) { + runCompute(rootTask, computeContext, coordinatorPlan, computeListener.acquireCompute()); + return; + } } else { if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { var error = "expected concrete indices with data node plan but got empty; data node plan " + dataNodePlan; @@ -191,31 +192,25 @@ public void execute( .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); - listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); - final AtomicBoolean cancelled = new AtomicBoolean(); - final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); final var exchangeSource = new ExchangeSourceHandler( queryPragmas.exchangeBufferSize(), transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); - RefCountingListener refs = new RefCountingListener(listener.map(unused -> new Result(collectedPages, collectedProfiles))) + var computeListener = new ComputeListener( + transportService, + rootTask, + listener.map(r -> new Result(collectedPages, r.getProfiles())) + ) ) { // run compute on the coordinator - exchangeSource.addCompletionListener(refs.acquire()); + exchangeSource.addCompletionListener(computeListener.acquireAvoid()); runCompute( rootTask, new ComputeContext(sessionId, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, exchangeSource, null), coordinatorPlan, - cancelOnFailure(rootTask, cancelled, refs.acquire()).map(driverProfiles -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(driverProfiles); - } - return null; - }) + computeListener.acquireCompute() ); // starts computes on data nodes on the main cluster if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { @@ -228,17 +223,10 @@ public void execute( Set.of(localConcreteIndices.indices()), localOriginalIndices.indices(), exchangeSource, - ActionListener.releaseAfter(refs.acquire(), exchangeSource.addEmptySink()), - () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(response.getProfiles()); - } - return null; - }) + computeListener ); } - // starts computes on remote cluster + // starts computes on remote clusters startComputeOnRemoteClusters( sessionId, rootTask, @@ -246,13 +234,7 @@ public void execute( dataNodePlan, exchangeSource, getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), - () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(response.getProfiles()); - } - return null; - }) + computeListener ); } } @@ -288,8 +270,7 @@ private void startComputeOnDataNodes( Set concreteIndices, String[] originalIndices, ExchangeSourceHandler exchangeSource, - ActionListener parentListener, - Supplier> dataNodeListenerSupplier + ComputeListener computeListener ) { var planWithReducer = configuration.pragmas().nodeLevelReduction() == false ? dataNodePlan @@ -303,12 +284,12 @@ private void startComputeOnDataNodes( // Since it's used only for @timestamp, it is relatively safe to assume it's not needed // but it would be better to have a proper impl. QueryBuilder requestFilter = PlannerUtils.requestFilter(planWithReducer, x -> true); + var lookupListener = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodes -> { - try (RefCountingRunnable refs = new RefCountingRunnable(() -> parentListener.onResponse(null))) { + try (RefCountingListener refs = new RefCountingListener(lookupListener)) { // For each target node, first open a remote exchange on the remote node, then link the exchange source to // the new remote exchange sink, and initialize the computation on the target node via data-node-request. for (DataNode node : dataNodes) { - var dataNodeListener = ActionListener.releaseAfter(dataNodeListenerSupplier.get(), refs.acquire()); var queryPragmas = configuration.pragmas(); ExchangeService.openExchange( transportService, @@ -316,9 +297,10 @@ private void startComputeOnDataNodes( sessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, - dataNodeListener.delegateFailureAndWrap((delegate, unused) -> { + refs.acquire().delegateFailureAndWrap((l, unused) -> { var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection); exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + var dataNodeListener = ActionListener.runBefore(computeListener.acquireCompute(), () -> l.onResponse(null)); transportService.sendChildRequest( node.connection, DATA_ACTION_NAME, @@ -332,13 +314,13 @@ private void startComputeOnDataNodes( ), parentTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(delegate, ComputeResponse::new, esqlExecutor) + new ActionListenerResponseHandler<>(dataNodeListener, ComputeResponse::new, esqlExecutor) ); }) ); } } - }, parentListener::onFailure)); + }, lookupListener::onFailure)); } private void startComputeOnRemoteClusters( @@ -348,19 +330,19 @@ private void startComputeOnRemoteClusters( PhysicalPlan plan, ExchangeSourceHandler exchangeSource, List clusters, - Supplier> listener + ComputeListener computeListener ) { - try (RefCountingRunnable refs = new RefCountingRunnable(exchangeSource.addEmptySink()::close)) { + var queryPragmas = configuration.pragmas(); + var linkExchangeListeners = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); + try (RefCountingListener refs = new RefCountingListener(linkExchangeListeners)) { for (RemoteCluster cluster : clusters) { - var targetNodeListener = ActionListener.releaseAfter(listener.get(), refs.acquire()); - var queryPragmas = configuration.pragmas(); ExchangeService.openExchange( transportService, cluster.connection, sessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, - targetNodeListener.delegateFailureAndWrap((l, unused) -> { + refs.acquire().delegateFailureAndWrap((l, unused) -> { var remoteSink = exchangeService.newRemoteSink(rootTask, sessionId, transportService, cluster.connection); exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); var clusterRequest = new ClusterComputeRequest( @@ -371,13 +353,14 @@ private void startComputeOnRemoteClusters( cluster.concreteIndices, cluster.originalIndices ); + var clusterListener = ActionListener.runBefore(computeListener.acquireCompute(), () -> l.onResponse(null)); transportService.sendChildRequest( cluster.connection, CLUSTER_ACTION_NAME, clusterRequest, rootTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(l, ComputeResponse::new, esqlExecutor) + new ActionListenerResponseHandler<>(clusterListener, ComputeResponse::new, esqlExecutor) ); }) ); @@ -385,17 +368,7 @@ private void startComputeOnRemoteClusters( } } - private ActionListener cancelOnFailure(CancellableTask task, AtomicBoolean cancelled, ActionListener listener) { - return listener.delegateResponse((l, e) -> { - l.onFailure(e); - if (cancelled.compareAndSet(false, true)) { - LOGGER.debug("cancelling ESQL task {} on failure", task); - transportService.getTaskManager().cancelTaskAndDescendants(task, "cancelled", false, ActionListener.noop()); - } - }); - } - - void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, ActionListener> listener) { + void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, ActionListener listener) { listener = ActionListener.runBefore(listener, () -> Releasables.close(context.searchContexts)); List contexts = new ArrayList<>(context.searchContexts.size()); for (int i = 0; i < context.searchContexts.size(); i++) { @@ -445,9 +418,10 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, } ActionListener listenerCollectingStatus = listener.map(ignored -> { if (context.configuration.profile()) { - return drivers.stream().map(Driver::profile).toList(); + return new ComputeResponse(drivers.stream().map(Driver::profile).toList()); + } else { + return new ComputeResponse(List.of()); } - return null; }); listenerCollectingStatus = ActionListener.releaseAfter(listenerCollectingStatus, () -> Releasables.close(drivers)); driverRunner.executeDrivers( @@ -612,8 +586,7 @@ private class DataNodeRequestExecutor { private final DataNodeRequest request; private final CancellableTask parentTask; private final ExchangeSinkHandler exchangeSink; - private final ActionListener listener; - private final List driverProfiles; + private final ComputeListener computeListener; private final int maxConcurrentShards; private final ExchangeSink blockingSink; // block until we have completed on all shards or the coordinator has enough data @@ -622,14 +595,12 @@ private class DataNodeRequestExecutor { CancellableTask parentTask, ExchangeSinkHandler exchangeSink, int maxConcurrentShards, - List driverProfiles, - ActionListener listener + ComputeListener computeListener ) { this.request = request; this.parentTask = parentTask; this.exchangeSink = exchangeSink; - this.listener = listener; - this.driverProfiles = driverProfiles; + this.computeListener = computeListener; this.maxConcurrentShards = maxConcurrentShards; this.blockingSink = exchangeSink.createExchangeSink(); } @@ -647,40 +618,46 @@ private void runBatch(int startBatchIndex) { final var sessionId = request.sessionId(); final int endBatchIndex = Math.min(startBatchIndex + maxConcurrentShards, request.shardIds().size()); List shardIds = request.shardIds().subList(startBatchIndex, endBatchIndex); + ActionListener batchListener = new ActionListener<>() { + final ActionListener ref = computeListener.acquireCompute(); + + @Override + public void onResponse(ComputeResponse result) { + try { + onBatchCompleted(endBatchIndex); + } finally { + ref.onResponse(result); + } + } + + @Override + public void onFailure(Exception e) { + try { + exchangeService.finishSinkHandler(request.sessionId(), e); + } finally { + ref.onFailure(e); + } + } + }; acquireSearchContexts(clusterAlias, shardIds, configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH, ESQL_WORKER_THREAD_POOL_NAME); var computeContext = new ComputeContext(sessionId, clusterAlias, searchContexts, configuration, null, exchangeSink); - runCompute( - parentTask, - computeContext, - request.plan(), - ActionListener.wrap(profiles -> onBatchCompleted(endBatchIndex, profiles), this::onFailure) - ); - }, this::onFailure)); + runCompute(parentTask, computeContext, request.plan(), batchListener); + }, batchListener::onFailure)); } - private void onBatchCompleted(int lastBatchIndex, List batchProfiles) { - if (request.configuration().profile()) { - driverProfiles.addAll(batchProfiles); - } + private void onBatchCompleted(int lastBatchIndex) { if (lastBatchIndex < request.shardIds().size() && exchangeSink.isFinished() == false) { runBatch(lastBatchIndex); } else { - blockingSink.finish(); // don't return until all pages are fetched + var completionListener = computeListener.acquireAvoid(); exchangeSink.addCompletionListener( - ContextPreservingActionListener.wrapPreservingContext( - ActionListener.runBefore(listener, () -> exchangeService.finishSinkHandler(request.sessionId(), null)), - transportService.getThreadPool().getThreadContext() - ) + ActionListener.runAfter(completionListener, () -> exchangeService.finishSinkHandler(request.sessionId(), null)) ); + blockingSink.finish(); } } - - private void onFailure(Exception e) { - exchangeService.finishSinkHandler(request.sessionId(), e); - listener.onFailure(e); - } } private void runComputeOnDataNode( @@ -688,17 +665,10 @@ private void runComputeOnDataNode( String externalId, PhysicalPlan reducePlan, DataNodeRequest request, - ActionListener listener + ComputeListener computeListener ) { - final List collectedProfiles = request.configuration().profile() - ? Collections.synchronizedList(new ArrayList<>()) - : List.of(); - final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); - final RefCountingListener listenerRefs = new RefCountingListener( - ActionListener.runBefore(listener.map(unused -> new ComputeResponse(collectedProfiles)), responseHeadersCollector::finish) - ); + var parentListener = computeListener.acquireAvoid(); try { - final AtomicBoolean cancelled = new AtomicBoolean(); // run compute with target shards var internalSink = exchangeService.createSinkHandler(request.sessionId(), request.pragmas().exchangeBufferSize()); DataNodeRequestExecutor dataNodeRequestExecutor = new DataNodeRequestExecutor( @@ -706,17 +676,16 @@ private void runComputeOnDataNode( task, internalSink, request.configuration().pragmas().maxConcurrentShardsPerNode(), - collectedProfiles, - ActionListener.runBefore(cancelOnFailure(task, cancelled, listenerRefs.acquire()), responseHeadersCollector::collect) + computeListener ); dataNodeRequestExecutor.start(); // run the node-level reduction var externalSink = exchangeService.getSinkHandler(externalId); task.addListener(() -> exchangeService.finishSinkHandler(externalId, new TaskCancelledException(task.getReasonCancelled()))); var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor); - exchangeSource.addCompletionListener(listenerRefs.acquire()); + exchangeSource.addCompletionListener(computeListener.acquireAvoid()); exchangeSource.addRemoteSink(internalSink::fetchPageAsync, 1); - ActionListener reductionListener = cancelOnFailure(task, cancelled, listenerRefs.acquire()); + ActionListener reductionListener = computeListener.acquireCompute(); runCompute( task, new ComputeContext( @@ -728,26 +697,22 @@ private void runComputeOnDataNode( externalSink ), reducePlan, - ActionListener.wrap(driverProfiles -> { - responseHeadersCollector.collect(); - if (request.configuration().profile()) { - collectedProfiles.addAll(driverProfiles); - } + ActionListener.wrap(resp -> { // don't return until all pages are fetched - externalSink.addCompletionListener( - ActionListener.runBefore(reductionListener, () -> exchangeService.finishSinkHandler(externalId, null)) - ); + externalSink.addCompletionListener(ActionListener.running(() -> { + exchangeService.finishSinkHandler(externalId, null); + reductionListener.onResponse(resp); + })); }, e -> { exchangeService.finishSinkHandler(externalId, e); reductionListener.onFailure(e); }) ); + parentListener.onResponse(null); } catch (Exception e) { exchangeService.finishSinkHandler(externalId, e); exchangeService.finishSinkHandler(request.sessionId(), e); - listenerRefs.acquire().onFailure(e); - } finally { - listenerRefs.close(); + parentListener.onFailure(e); } } @@ -784,7 +749,9 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T request.aliasFilters(), request.plan() ); - runComputeOnDataNode((CancellableTask) task, sessionId, reducePlan, request, listener); + try (var computeListener = new ComputeListener(transportService, (CancellableTask) task, listener)) { + runComputeOnDataNode((CancellableTask) task, sessionId, reducePlan, request, computeListener); + } } } @@ -798,16 +765,18 @@ public void messageReceived(ClusterComputeRequest request, TransportChannel chan listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + request.plan())); return; } - runComputeOnRemoteCluster( - request.clusterAlias(), - request.sessionId(), - (CancellableTask) task, - request.configuration(), - (ExchangeSinkExec) request.plan(), - Set.of(request.indices()), - request.originalIndices(), - listener - ); + try (var computeListener = new ComputeListener(transportService, (CancellableTask) task, listener)) { + runComputeOnRemoteCluster( + request.clusterAlias(), + request.sessionId(), + (CancellableTask) task, + request.configuration(), + (ExchangeSinkExec) request.plan(), + Set.of(request.indices()), + request.originalIndices(), + computeListener + ); + } } } @@ -828,28 +797,20 @@ void runComputeOnRemoteCluster( ExchangeSinkExec plan, Set concreteIndices, String[] originalIndices, - ActionListener listener + ComputeListener computeListener ) { final var exchangeSink = exchangeService.getSinkHandler(globalSessionId); parentTask.addListener( () -> exchangeService.finishSinkHandler(globalSessionId, new TaskCancelledException(parentTask.getReasonCancelled())) ); - ThreadPool threadPool = transportService.getThreadPool(); - final var responseHeadersCollector = new ResponseHeadersCollector(threadPool.getThreadContext()); - listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); - final AtomicBoolean cancelled = new AtomicBoolean(); - final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); final String localSessionId = clusterAlias + ":" + globalSessionId; var exchangeSource = new ExchangeSourceHandler( configuration.pragmas().exchangeBufferSize(), transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); - try ( - Releasable ignored = exchangeSource.addEmptySink(); - RefCountingListener refs = new RefCountingListener(listener.map(unused -> new ComputeResponse(collectedProfiles))) - ) { - exchangeSink.addCompletionListener(refs.acquire()); - exchangeSource.addCompletionListener(refs.acquire()); + try (Releasable ignored = exchangeSource.addEmptySink()) { + exchangeSink.addCompletionListener(computeListener.acquireAvoid()); + exchangeSource.addCompletionListener(computeListener.acquireAvoid()); PhysicalPlan coordinatorPlan = new ExchangeSinkExec( plan.source(), plan.output(), @@ -860,13 +821,7 @@ void runComputeOnRemoteCluster( parentTask, new ComputeContext(localSessionId, clusterAlias, List.of(), configuration, exchangeSource, exchangeSink), coordinatorPlan, - cancelOnFailure(parentTask, cancelled, refs.acquire()).map(driverProfiles -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(driverProfiles); - } - return null; - }) + computeListener.acquireCompute() ); startComputeOnDataNodes( localSessionId, @@ -877,14 +832,7 @@ void runComputeOnRemoteCluster( concreteIndices, originalIndices, exchangeSource, - ActionListener.releaseAfter(refs.acquire(), exchangeSource.addEmptySink()), - () -> cancelOnFailure(parentTask, cancelled, refs.acquire()).map(r -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(r.getProfiles()); - } - return null; - }) + computeListener ); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java new file mode 100644 index 0000000000000..c93f3b9e0e350 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.TaskCancellationService; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.tasks.MockTaskManager.SPY_TASK_MANAGER_SETTING; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; + +public class ComputeListenerTests extends ESTestCase { + private ThreadPool threadPool; + private TransportService transportService; + + @Before + public void setUpTransportService() { + threadPool = new TestThreadPool(getTestName()); + transportService = MockTransportService.createNewService( + Settings.builder().put(SPY_TASK_MANAGER_SETTING.getKey(), true).build(), + VersionInformation.CURRENT, + TransportVersionUtils.randomVersion(), + threadPool + ); + transportService.start(); + TaskCancellationService cancellationService = new TaskCancellationService(transportService); + transportService.getTaskManager().setTaskCancellationService(cancellationService); + Mockito.clearInvocations(transportService.getTaskManager()); + } + + @After + public void shutdownTransportService() { + transportService.close(); + terminate(threadPool); + } + + private CancellableTask newTask() { + return new CancellableTask( + randomIntBetween(1, 100), + "test-type", + "test-action", + "test-description", + TaskId.EMPTY_TASK_ID, + Map.of() + ); + } + + private ComputeResponse randomResponse() { + int numProfiles = randomIntBetween(0, 2); + List profiles = new ArrayList<>(numProfiles); + for (int i = 0; i < numProfiles; i++) { + profiles.add(new DriverProfile(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), List.of())); + } + return new ComputeResponse(profiles); + } + + public void testEmpty() { + PlainActionFuture results = new PlainActionFuture<>(); + try (ComputeListener ignored = new ComputeListener(transportService, newTask(), results)) { + assertFalse(results.isDone()); + } + assertTrue(results.isDone()); + assertThat(results.actionGet(10, TimeUnit.SECONDS).getProfiles(), empty()); + } + + public void testCollectComputeResults() { + PlainActionFuture future = new PlainActionFuture<>(); + List allProfiles = new ArrayList<>(); + try (ComputeListener computeListener = new ComputeListener(transportService, newTask(), future)) { + int tasks = randomIntBetween(1, 100); + for (int t = 0; t < tasks; t++) { + if (randomBoolean()) { + ActionListener subListener = computeListener.acquireAvoid(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(null)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } else { + ComputeResponse resp = randomResponse(); + allProfiles.addAll(resp.getProfiles()); + ActionListener subListener = computeListener.acquireCompute(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(resp)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } + } + } + ComputeResponse result = future.actionGet(10, TimeUnit.SECONDS); + assertThat( + result.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) + ); + Mockito.verifyNoInteractions(transportService.getTaskManager()); + } + + public void testCancelOnFailure() throws Exception { + Queue rootCauseExceptions = ConcurrentCollections.newQueue(); + IntStream.range(0, between(1, 100)) + .forEach( + n -> rootCauseExceptions.add(new CircuitBreakingException("breaking exception " + n, CircuitBreaker.Durability.TRANSIENT)) + ); + int successTasks = between(1, 50); + int failedTasks = between(1, 100); + PlainActionFuture rootListener = new PlainActionFuture<>(); + CancellableTask rootTask = newTask(); + try (ComputeListener computeListener = new ComputeListener(transportService, rootTask, rootListener)) { + for (int i = 0; i < successTasks; i++) { + ActionListener subListener = computeListener.acquireCompute(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(randomResponse())), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } + for (int i = 0; i < failedTasks; i++) { + ActionListener subListener = randomBoolean() ? computeListener.acquireAvoid() : computeListener.acquireCompute(); + threadPool.schedule(ActionRunnable.wrap(subListener, l -> { + Exception ex = rootCauseExceptions.poll(); + if (ex == null) { + ex = new TaskCancelledException("task was cancelled"); + } + l.onFailure(ex); + }), TimeValue.timeValueNanos(between(0, 100)), threadPool.generic()); + } + } + assertBusy(rootListener::isDone); + ExecutionException failure = expectThrows(ExecutionException.class, () -> rootListener.get(1, TimeUnit.SECONDS)); + Throwable cause = failure.getCause(); + assertNotNull(failure); + assertThat(cause, instanceOf(CircuitBreakingException.class)); + assertThat(failure.getSuppressed().length, lessThan(10)); + Mockito.verify(transportService.getTaskManager(), Mockito.times(1)) + .cancelTaskAndDescendants(eq(rootTask), eq("cancelled on failure"), eq(false), any()); + } + + public void testCollectWarnings() throws Exception { + List allProfiles = new ArrayList<>(); + Map> allWarnings = new HashMap<>(); + ActionListener rootListener = new ActionListener<>() { + @Override + public void onResponse(ComputeResponse result) { + assertThat( + result.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) + ); + Map> responseHeaders = threadPool.getThreadContext() + .getResponseHeaders() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new HashSet<>(e.getValue()))); + assertThat(responseHeaders, equalTo(allWarnings)); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }; + CountDownLatch latch = new CountDownLatch(1); + try ( + ComputeListener computeListener = new ComputeListener( + transportService, + newTask(), + ActionListener.runAfter(rootListener, latch::countDown) + ) + ) { + int tasks = randomIntBetween(1, 100); + for (int t = 0; t < tasks; t++) { + if (randomBoolean()) { + ActionListener subListener = computeListener.acquireAvoid(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(null)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } else { + ComputeResponse resp = randomResponse(); + allProfiles.addAll(resp.getProfiles()); + int numWarnings = randomIntBetween(1, 5); + Map warnings = new HashMap<>(); + for (int i = 0; i < numWarnings; i++) { + warnings.put("key" + between(1, 10), "value" + between(1, 10)); + } + for (Map.Entry e : warnings.entrySet()) { + allWarnings.computeIfAbsent(e.getKey(), v -> new HashSet<>()).add(e.getValue()); + } + ActionListener subListener = computeListener.acquireCompute(); + threadPool.schedule(ActionRunnable.wrap(subListener, l -> { + for (Map.Entry e : warnings.entrySet()) { + threadPool.getThreadContext().addResponseHeader(e.getKey(), e.getValue()); + } + l.onResponse(resp); + }), TimeValue.timeValueNanos(between(0, 100)), threadPool.generic()); + } + } + } + assertTrue(latch.await(10, TimeUnit.SECONDS)); + Mockito.verifyNoInteractions(transportService.getTaskManager()); + } +} From 088fabe32d1baacd70e5a0b7e675045a67fd870a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 7 Jul 2024 09:14:39 -0700 Subject: [PATCH 016/406] Adjust cancellation message in task tests (#110546) (#110567) Adding `parent task was cancelled [test cancel]` to the list of allowed cancellation messages. --- .../org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index 9778756176574..cde4f10ef556c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -59,6 +59,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -325,7 +326,7 @@ private void assertCancelled(ActionFuture response) throws Ex */ assertThat( cancelException.getMessage(), - either(equalTo("test cancel")).or(equalTo("task cancelled")).or(equalTo("request cancelled test cancel")) + in(List.of("test cancel", "task cancelled", "request cancelled test cancel", "parent task was cancelled [test cancel]")) ); assertBusy( () -> assertThat( From 7b51ff6ba319fc7426f7c188bbe35ed692ed10eb Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 8 Jul 2024 08:16:14 +0100 Subject: [PATCH 017/406] AwaitsFix for #110551 --- .../elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java index 5adc0b090ed37..6a4e973d8fcc5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java @@ -48,6 +48,7 @@ public static void removeDisruptFSyncFS() { PathUtilsForTesting.teardown(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110551") public void testFsyncFailureDoesNotAdvanceLocalCheckpoints() { String indexName = randomIdentifier(); client().admin() From 9cbb8bee01b949d4615b873ea1a98cf8c308f184 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 8 Jul 2024 09:38:54 +0100 Subject: [PATCH 018/406] Add known-issues for all affected releases for the feature upgrade issue (#110523) (#110575) --- docs/reference/release-notes/8.13.0.asciidoc | 7 +++++++ docs/reference/release-notes/8.13.1.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.13.2.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.13.3.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.13.4.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.14.0.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.14.1.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.14.2.asciidoc | 12 +++++++++++- 8 files changed, 78 insertions(+), 1 deletion(-) diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc index dba4fdbe5f67e..4bb2913f07be7 100644 --- a/docs/reference/release-notes/8.13.0.asciidoc +++ b/docs/reference/release-notes/8.13.0.asciidoc @@ -21,6 +21,13 @@ This affects clusters running version 8.10 or later, with an active downsampling https://www.elastic.co/guide/en/elasticsearch/reference/current/downsampling-ilm.html[configuration] or a configuration that was activated at some point since upgrading to version 8.10 or later. +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[breaking-8.13.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.13.1.asciidoc b/docs/reference/release-notes/8.13.1.asciidoc index 7b3dbff74cc6e..572f9fe1172a9 100644 --- a/docs/reference/release-notes/8.13.1.asciidoc +++ b/docs/reference/release-notes/8.13.1.asciidoc @@ -3,6 +3,16 @@ Also see <>. +[[known-issues-8.13.1]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[bug-8.13.1]] [float] diff --git a/docs/reference/release-notes/8.13.2.asciidoc b/docs/reference/release-notes/8.13.2.asciidoc index 514118f5ea575..20ae7abbb5769 100644 --- a/docs/reference/release-notes/8.13.2.asciidoc +++ b/docs/reference/release-notes/8.13.2.asciidoc @@ -3,6 +3,16 @@ Also see <>. +[[known-issues-8.13.2]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[bug-8.13.2]] [float] diff --git a/docs/reference/release-notes/8.13.3.asciidoc b/docs/reference/release-notes/8.13.3.asciidoc index 9aee0dd815f6d..ea51bd6f9b743 100644 --- a/docs/reference/release-notes/8.13.3.asciidoc +++ b/docs/reference/release-notes/8.13.3.asciidoc @@ -10,6 +10,16 @@ Also see <>. SQL:: * Limit how much space some string functions can use {es-pull}107333[#107333] +[[known-issues-8.13.3]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[bug-8.13.3]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.13.4.asciidoc b/docs/reference/release-notes/8.13.4.asciidoc index bf3f2f497d8fc..b60c9f485bb31 100644 --- a/docs/reference/release-notes/8.13.4.asciidoc +++ b/docs/reference/release-notes/8.13.4.asciidoc @@ -3,6 +3,16 @@ Also see <>. +[[known-issues-8.13.4]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[bug-8.13.4]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.14.0.asciidoc b/docs/reference/release-notes/8.14.0.asciidoc index 42f2f86a123ed..5b92c49ced70a 100644 --- a/docs/reference/release-notes/8.14.0.asciidoc +++ b/docs/reference/release-notes/8.14.0.asciidoc @@ -12,6 +12,16 @@ Security:: * Apply stricter Document Level Security (DLS) rules for the validate query API with the rewrite parameter {es-pull}105709[#105709] * Apply stricter Document Level Security (DLS) rules for terms aggregations when min_doc_count is set to 0 {es-pull}105714[#105714] +[[known-issues-8.14.0]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[bug-8.14.0]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.14.1.asciidoc b/docs/reference/release-notes/8.14.1.asciidoc index f161c7d08099c..1cab442eb9ac1 100644 --- a/docs/reference/release-notes/8.14.1.asciidoc +++ b/docs/reference/release-notes/8.14.1.asciidoc @@ -4,6 +4,16 @@ Also see <>. +[[known-issues-8.14.1]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[bug-8.14.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.14.2.asciidoc b/docs/reference/release-notes/8.14.2.asciidoc index 2bb374451b2ac..9273355106a03 100644 --- a/docs/reference/release-notes/8.14.2.asciidoc +++ b/docs/reference/release-notes/8.14.2.asciidoc @@ -5,6 +5,16 @@ coming[8.14.2] Also see <>. +[[known-issues-8.14.2]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + [[bug-8.14.2]] [float] === Bug fixes @@ -35,4 +45,4 @@ Ranking:: Search:: * Add hexstring support byte painless scorers {es-pull}109492[#109492] -* Fix automatic tracking of collapse with `docvalue_fields` {es-pull}110103[#110103] \ No newline at end of file +* Fix automatic tracking of collapse with `docvalue_fields` {es-pull}110103[#110103] From e961f0b0030655ac1f9adf13c2e2428c20fdffdc Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 8 Jul 2024 10:49:06 -0400 Subject: [PATCH 019/406] A small tidiness refactor of the GeoIpTaskState's Metadata (#110553) (#110588) --- .../ingest/geoip/GeoIpDownloaderIT.java | 2 +- .../ingest/geoip/GeoIpDownloader.java | 19 ++++++++++++------- .../ingest/geoip/GeoIpTaskState.java | 16 +++++++--------- .../ingest/geoip/GeoIpDownloaderTests.java | 8 ++++---- 4 files changed, 24 insertions(+), 21 deletions(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 9dcd8abc7bc57..9eab00fbadf20 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -242,7 +242,7 @@ public void testGeoIpDatabasesDownload() throws Exception { Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"), state.getDatabases().keySet() ); - GeoIpTaskState.Metadata metadata = state.get(id); + GeoIpTaskState.Metadata metadata = state.getDatabases().get(id); int size = metadata.lastChunk() - metadata.firstChunk() + 1; assertResponse( prepareSearch(GeoIpDownloader.DATABASES_INDEX).setSize(size) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 895c9315d2325..5239e96856b7f 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -170,23 +170,28 @@ private List fetchDatabasesOverview() throws IOException { } // visible for testing - void processDatabase(Map databaseInfo) { + void processDatabase(final Map databaseInfo) { String name = databaseInfo.get("name").toString().replace(".tgz", "") + ".mmdb"; String md5 = (String) databaseInfo.get("md5_hash"); - if (state.contains(name) && Objects.equals(md5, state.get(name).md5())) { - updateTimestamp(name, state.get(name)); - return; - } - logger.debug("downloading geoip database [{}]", name); String url = databaseInfo.get("url").toString(); if (url.startsWith("http") == false) { // relative url, add it after last slash (i.e. resolve sibling) or at the end if there's no slash after http[s]:// int lastSlash = endpoint.substring(8).lastIndexOf('/'); url = (lastSlash != -1 ? endpoint.substring(0, lastSlash + 8) : endpoint) + "/" + url; } + processDatabase(name, md5, url); + } + + private void processDatabase(final String name, final String md5, final String url) { + Metadata metadata = state.getDatabases().getOrDefault(name, Metadata.EMPTY); + if (Objects.equals(metadata.md5(), md5)) { + updateTimestamp(name, metadata); + return; + } + logger.debug("downloading geoip database [{}]", name); long start = System.currentTimeMillis(); try (InputStream is = httpClient.get(url)) { - int firstChunk = state.contains(name) ? state.get(name).lastChunk() + 1 : 0; + int firstChunk = metadata.lastChunk() + 1; // if there is no metadata, then Metadata.EMPTY.lastChunk() + 1 = 0 int lastChunk = indexChunks(name, is, firstChunk, md5, start); if (lastChunk > firstChunk) { state = state.put(name, new Metadata(start, firstChunk, lastChunk - 1, md5, start)); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index d55f517b46e24..a405d90b24dcc 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -84,14 +84,6 @@ public Map getDatabases() { return databases; } - public boolean contains(String name) { - return databases.containsKey(name); - } - - public Metadata get(String name) { - return databases.get(name); - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -142,7 +134,13 @@ public void writeTo(StreamOutput out) throws IOException { record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck) implements ToXContentObject { - static final String NAME = GEOIP_DOWNLOADER + "-metadata"; + /** + * An empty Metadata object useful for getOrDefault -type calls. Crucially, the 'lastChunk' is -1, so it's safe to use + * with logic that says the new firstChunk is the old lastChunk + 1. + */ + static Metadata EMPTY = new Metadata(-1, -1, -1, "", -1); + + private static final String NAME = GEOIP_DOWNLOADER + "-metadata"; private static final ParseField LAST_CHECK = new ParseField("last_check"); private static final ParseField LAST_UPDATE = new ParseField("last_update"); private static final ParseField FIRST_CHUNK = new ParseField("first_chunk"); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 9cc5405c1b617..4834c581e9386 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -290,8 +290,8 @@ int indexChunks(String name, InputStream is, int chunk, String expectedMd5, long @Override void updateTaskState() { - assertEquals(0, state.get("test.mmdb").firstChunk()); - assertEquals(10, state.get("test.mmdb").lastChunk()); + assertEquals(0, state.getDatabases().get("test.mmdb").firstChunk()); + assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk()); } @Override @@ -341,8 +341,8 @@ int indexChunks(String name, InputStream is, int chunk, String expectedMd5, long @Override void updateTaskState() { - assertEquals(9, state.get("test.mmdb").firstChunk()); - assertEquals(10, state.get("test.mmdb").lastChunk()); + assertEquals(9, state.getDatabases().get("test.mmdb").firstChunk()); + assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk()); } @Override From d0859d59022d69ea9a9c5a533e33cf64430dec66 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 8 Jul 2024 17:20:26 -0400 Subject: [PATCH 020/406] Fix ExactKnnQueryBuilderTests testToQuery (#110357) (#110589) (#110608) closes https://github.com/elastic/elasticsearch/issues/110357 With the loosening of what is considered a unit vector, we need to ensure we only normalize for equality checking if the query vector is indeed not a unit vector. (cherry picked from commit fd790ff351f43523e6c05621b5d1be7fe30f141c) --- muted-tests.yml | 3 --- .../index/mapper/vectors/DenseVectorFieldMapper.java | 2 +- .../search/vectors/ExactKnnQueryBuilderTests.java | 5 ++++- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 990b7d5dc5130..d46a9355c201f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -88,9 +88,6 @@ tests: - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testMinVersionAsOldVersion issue: https://github.com/elastic/elasticsearch/issues/109454 -- class: org.elasticsearch.search.vectors.ExactKnnQueryBuilderTests - method: testToQuery - issue: https://github.com/elastic/elasticsearch/issues/110357 - class: org.elasticsearch.search.aggregations.bucket.terms.RareTermsIT method: testSingleValuedString issue: https://github.com/elastic/elasticsearch/issues/110388 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 989c92e909ce2..d27c0acdb6b2e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -98,7 +98,7 @@ public class DenseVectorFieldMapper extends FieldMapper { public static final String COSINE_MAGNITUDE_FIELD_SUFFIX = "._magnitude"; private static final float EPS = 1e-3f; - static boolean isNotUnitVector(float magnitude) { + public static boolean isNotUnitVector(float magnitude) { return Math.abs(magnitude - 1.0f) > EPS; } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java index 1e77e35b60a4c..627f8a184a147 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractQueryTestCase; @@ -87,7 +88,9 @@ protected void doAssertLuceneQuery(ExactKnnQueryBuilder queryBuilder, Query quer DenseVectorQuery.Floats denseVectorQuery = (DenseVectorQuery.Floats) query; assertEquals(VECTOR_FIELD, denseVectorQuery.field); float[] expected = Arrays.copyOf(queryBuilder.getQuery().asFloatVector(), queryBuilder.getQuery().asFloatVector().length); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE)) { + float magnitude = VectorUtil.dotProduct(expected, expected); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE) + && DenseVectorFieldMapper.isNotUnitVector(magnitude)) { VectorUtil.l2normalize(expected); assertArrayEquals(expected, denseVectorQuery.getQuery(), 0.0f); } else { From 41c662d0d12c7e90ec7c237b7b9d24fd6436c557 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 8 Jul 2024 18:07:13 -0700 Subject: [PATCH 021/406] Fix BWC for compute listener (#110615) (#110617) ComputeResponse from old nodes may have a null value instead of an empty list for profiles. Relates #110400 Closes #110591 --- .../org/elasticsearch/xpack/esql/plugin/ComputeListener.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java index f8f35bb6f0b4f..01d50d505f7f2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java @@ -76,8 +76,9 @@ ActionListener acquireAvoid() { ActionListener acquireCompute() { return acquireAvoid().map(resp -> { responseHeaders.collect(); - if (resp != null && resp.getProfiles().isEmpty() == false) { - collectedProfiles.addAll(resp.getProfiles()); + var profiles = resp.getProfiles(); + if (profiles != null && profiles.isEmpty() == false) { + collectedProfiles.addAll(profiles); } return null; }); From c23f1244e0b669d49b7ced53050dc89215b4480f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 9 Jul 2024 00:17:12 -0700 Subject: [PATCH 022/406] Fix translate metrics without rate (#110614) (#110621) Currently, we incorrectly remove the `@timestamp` attribute from the EsRelation when translating metric aggregates. --- .../rules/TranslateMetricsAggregate.java | 2 +- .../optimizer/LogicalPlanOptimizerTests.java | 50 +++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java index 1e222823ebf2b..ca08dd57189cc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java @@ -218,7 +218,7 @@ private static Aggregate toStandardAggregate(Aggregate metrics) { final LogicalPlan child = metrics.child().transformDown(EsRelation.class, r -> { var attributes = new ArrayList<>(new AttributeSet(metrics.inputSet())); attributes.removeIf(a -> a.name().equals(MetadataAttribute.TSID_FIELD)); - if (attributes.stream().noneMatch(a -> a.name().equals(MetadataAttribute.TIMESTAMP_FIELD)) == false) { + if (attributes.stream().noneMatch(a -> a.name().equals(MetadataAttribute.TIMESTAMP_FIELD))) { attributes.removeIf(a -> a.name().equals(MetadataAttribute.TIMESTAMP_FIELD)); } return new EsRelation(r.source(), r.index(), new ArrayList<>(attributes), IndexMode.STANDARD); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 6a9e7a4000734..de5d734c559d3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -5477,6 +5477,56 @@ METRICS k8s avg(round(1.05 * rate(network.total_bytes_in))) BY bucket(@timestamp assertThat(Expressions.attribute(values.field()).name(), equalTo("cluster")); } + public void testMetricsWithoutRate() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + List queries = List.of(""" + METRICS k8s count(to_long(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """, """ + METRICS k8s | STATS count(to_long(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """, """ + FROM k8s | STATS count(to_long(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """); + List plans = new ArrayList<>(); + for (String query : queries) { + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + plans.add(plan); + } + for (LogicalPlan plan : plans) { + Limit limit = as(plan, Limit.class); + Aggregate aggregate = as(limit.child(), Aggregate.class); + assertThat(aggregate.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + assertThat(aggregate.aggregates(), hasSize(2)); + assertThat(aggregate.groupings(), hasSize(1)); + Eval eval = as(aggregate.child(), Eval.class); + assertThat(eval.fields(), hasSize(2)); + assertThat(Alias.unwrap(eval.fields().get(0)), instanceOf(Bucket.class)); + assertThat(Alias.unwrap(eval.fields().get(1)), instanceOf(ToLong.class)); + EsRelation relation = as(eval.child(), EsRelation.class); + assertThat(relation.indexMode(), equalTo(IndexMode.STANDARD)); + } + for (int i = 1; i < plans.size(); i++) { + assertThat(plans.get(i), equalTo(plans.get(0))); + } + } + + public void testRateInStats() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = """ + METRICS k8s | STATS max(rate(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """; + VerificationException error = expectThrows( + VerificationException.class, + () -> logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))) + ); + assertThat(error.getMessage(), equalTo(""" + Found 1 problem + line 1:25: the rate aggregate[rate(network.total_bytes_in)] can only be used within the metrics command""")); + } + private Literal nullOf(DataType dataType) { return new Literal(Source.EMPTY, null, dataType); } From ecf77bfee9f076e5010c005cde0c58e74b583264 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 9 Jul 2024 09:36:34 +0200 Subject: [PATCH 023/406] [CI] Temporally increase disk space for DRA build jobs (#110601) (#110622) (cherry picked from commit fbcde9c0fd40c3f461af0cefe1af6eabe9da5091) --- .buildkite/pipelines/dra-workflow.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipelines/dra-workflow.yml b/.buildkite/pipelines/dra-workflow.yml index 32a2b7d22134a..bcc6c9c57d756 100644 --- a/.buildkite/pipelines/dra-workflow.yml +++ b/.buildkite/pipelines/dra-workflow.yml @@ -7,7 +7,7 @@ steps: image: family/elasticsearch-ubuntu-2204 machineType: custom-32-98304 buildDirectory: /dev/shm/bk - diskSizeGb: 250 + diskSizeGb: 350 - wait # The hadoop build depends on the ES artifact # So let's trigger the hadoop build any time we build a new staging artifact From 0f98f30e761b60647c913564898b17b40e8f21a0 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 9 Jul 2024 10:30:00 +0100 Subject: [PATCH 024/406] [ML] Fixes processing chunked results in AWS Bedrock service (#110592) (#110624) Fixes error using the Amazon Bedrock service with a large input that was chunked. --- .../amazonbedrock/AmazonBedrockService.java | 24 +------------------ .../azureopenai/AzureOpenAiService.java | 18 -------------- .../AmazonBedrockServiceTests.java | 21 +++++++++------- 3 files changed, 14 insertions(+), 49 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java index dadcc8a40245e..459ca367058f8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java @@ -23,10 +23,6 @@ import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionCreator; import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockRequestSender; @@ -47,7 +43,6 @@ import java.util.Set; import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; -import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -115,10 +110,6 @@ protected void doChunkedInfer( TimeValue timeout, ActionListener> listener ) { - ActionListener inferListener = listener.delegateFailureAndWrap( - (delegate, response) -> delegate.onResponse(translateToChunkedResults(input, response)) - ); - var actionCreator = new AmazonBedrockActionCreator(amazonBedrockSender, this.getServiceComponents(), timeout); if (model instanceof AmazonBedrockModel baseAmazonBedrockModel) { var maxBatchSize = getEmbeddingsMaxBatchSize(baseAmazonBedrockModel.provider()); @@ -126,26 +117,13 @@ protected void doChunkedInfer( .batchRequestsWithListeners(listener); for (var request : batchedRequests) { var action = baseAmazonBedrockModel.accept(actionCreator, taskSettings); - action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, inferListener); + action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); } } else { listener.onFailure(createInvalidModelException(model)); } } - private static List translateToChunkedResults( - List inputs, - InferenceServiceResults inferenceResults - ) { - if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { - return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); - } else if (inferenceResults instanceof ErrorInferenceResults error) { - return List.of(new ErrorChunkedInferenceResults(error.getException())); - } else { - throw createInvalidChunkedResultException(InferenceTextEmbeddingFloatResults.NAME, inferenceResults.getWriteableName()); - } - } - @Override public String name() { return NAME; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java index 3facb78864831..3c75243770f97 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java @@ -24,10 +24,6 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.azureopenai.AzureOpenAiActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; @@ -44,7 +40,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -246,19 +241,6 @@ protected void doChunkedInfer( } } - private static List translateToChunkedResults( - List inputs, - InferenceServiceResults inferenceResults - ) { - if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { - return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); - } else if (inferenceResults instanceof ErrorInferenceResults error) { - return List.of(new ErrorChunkedInferenceResults(error.getException())); - } else { - throw createInvalidChunkedResultException(InferenceTextEmbeddingFloatResults.NAME, inferenceResults.getWriteableName()); - } - } - /** * For text embedding models get the embedding size and * update the service settings. diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java index 00a840c8d4812..ae413fc17425c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java @@ -1048,13 +1048,18 @@ public void testChunkedInfer_CallsInfer_ConvertsFloatResponse_ForEmbeddings() th try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { - var mockResults = new InferenceTextEmbeddingFloatResults( - List.of( - new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F }), - new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.456F, 0.987F }) - ) - ); - requestSender.enqueue(mockResults); + { + var mockResults1 = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(mockResults1); + } + { + var mockResults2 = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.223F, 0.278F })) + ); + requestSender.enqueue(mockResults2); + } var model = AmazonBedrockEmbeddingsModelTests.createModel( "id", @@ -1089,7 +1094,7 @@ public void testChunkedInfer_CallsInfer_ConvertsFloatResponse_ForEmbeddings() th var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(1); assertThat(floatResult.chunks(), hasSize(1)); assertEquals("xyz", floatResult.chunks().get(0).matchedText()); - assertArrayEquals(new float[] { 0.456F, 0.987F }, floatResult.chunks().get(0).embedding(), 0.0f); + assertArrayEquals(new float[] { 0.223F, 0.278F }, floatResult.chunks().get(0).embedding(), 0.0f); } } } From a90ca3f10e29d2ec60eb28c0efd7e3e3e59c3678 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Tue, 9 Jul 2024 11:27:29 +0100 Subject: [PATCH 025/406] Implement xorBitCount in Elasticsearch (#110599) (#110626) Backport of: * #110599 --- .../vectors/ES815BitFlatVectorsFormat.java | 4 +- .../field/vectors/ByteBinaryDenseVector.java | 2 +- .../field/vectors/ByteKnnDenseVector.java | 2 +- .../script/field/vectors/ESVectorUtil.java | 72 +++++++++++++++++++ 4 files changed, 76 insertions(+), 4 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/script/field/vectors/ESVectorUtil.java diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java index 659cc89bfe46d..de91833c99842 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java @@ -16,11 +16,11 @@ import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.util.VectorUtil; import org.apache.lucene.util.hnsw.RandomAccessVectorValues; import org.apache.lucene.util.hnsw.RandomVectorScorer; import org.apache.lucene.util.hnsw.RandomVectorScorerSupplier; import org.apache.lucene.util.quantization.RandomAccessQuantizedByteVectorValues; +import org.elasticsearch.script.field.vectors.ESVectorUtil; import java.io.IOException; @@ -100,7 +100,7 @@ public RandomVectorScorer getRandomVectorScorer( } static float hammingScore(byte[] a, byte[] b) { - return ((a.length * Byte.SIZE) - VectorUtil.xorBitCount(a, b)) / (float) (a.length * Byte.SIZE); + return ((a.length * Byte.SIZE) - ESVectorUtil.xorBitCount(a, b)) / (float) (a.length * Byte.SIZE); } static class HammingVectorScorer extends RandomVectorScorer.AbstractRandomVectorScorer { diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java index f2ff8fbccd2fb..e5c2d6a370f12 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java @@ -102,7 +102,7 @@ public double l1Norm(List queryVector) { @Override public int hamming(byte[] queryVector) { - return VectorUtil.xorBitCount(queryVector, vectorValue); + return ESVectorUtil.xorBitCount(queryVector, vectorValue); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java index e0ba032826aa1..0145eb3eae04b 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java @@ -103,7 +103,7 @@ public double l1Norm(List queryVector) { @Override public int hamming(byte[] queryVector) { - return VectorUtil.xorBitCount(queryVector, docVector); + return ESVectorUtil.xorBitCount(queryVector, docVector); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ESVectorUtil.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ESVectorUtil.java new file mode 100644 index 0000000000000..7d9542bccf357 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ESVectorUtil.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BitUtil; +import org.apache.lucene.util.Constants; + +/** + * This class consists of a single utility method that provides XOR bit count computed over signed bytes. + * Remove this class when Lucene version > 9.11 is released, and replace with Lucene's VectorUtil directly. + */ +public class ESVectorUtil { + + /** + * For xorBitCount we stride over the values as either 64-bits (long) or 32-bits (int) at a time. + * On ARM Long::bitCount is not vectorized, and therefore produces less than optimal code, when + * compared to Integer::bitCount. While Long::bitCount is optimal on x64. + */ + static final boolean XOR_BIT_COUNT_STRIDE_AS_INT = Constants.OS_ARCH.equals("aarch64"); + + /** + * XOR bit count computed over signed bytes. + * + * @param a bytes containing a vector + * @param b bytes containing another vector, of the same dimension + * @return the value of the XOR bit count of the two vectors + */ + public static int xorBitCount(byte[] a, byte[] b) { + if (a.length != b.length) { + throw new IllegalArgumentException("vector dimensions differ: " + a.length + "!=" + b.length); + } + if (XOR_BIT_COUNT_STRIDE_AS_INT) { + return xorBitCountInt(a, b); + } else { + return xorBitCountLong(a, b); + } + } + + /** XOR bit count striding over 4 bytes at a time. */ + static int xorBitCountInt(byte[] a, byte[] b) { + int distance = 0, i = 0; + for (final int upperBound = a.length & -Integer.BYTES; i < upperBound; i += Integer.BYTES) { + distance += Integer.bitCount((int) BitUtil.VH_NATIVE_INT.get(a, i) ^ (int) BitUtil.VH_NATIVE_INT.get(b, i)); + } + // tail: + for (; i < a.length; i++) { + distance += Integer.bitCount((a[i] ^ b[i]) & 0xFF); + } + return distance; + } + + /** XOR bit count striding over 8 bytes at a time. */ + static int xorBitCountLong(byte[] a, byte[] b) { + int distance = 0, i = 0; + for (final int upperBound = a.length & -Long.BYTES; i < upperBound; i += Long.BYTES) { + distance += Long.bitCount((long) BitUtil.VH_NATIVE_LONG.get(a, i) ^ (long) BitUtil.VH_NATIVE_LONG.get(b, i)); + } + // tail: + for (; i < a.length; i++) { + distance += Integer.bitCount((a[i] ^ b[i]) & 0xFF); + } + return distance; + } + + private ESVectorUtil() {} +} From b46807f4c1cdea30d088929667310623a3592187 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 9 Jul 2024 09:27:41 -0400 Subject: [PATCH 026/406] Fix flaky test #109978 (#110245) (#110634) CCS tests could split the vectors over any number of shards. Through empirical testing, I determined this commits values work to provide the expected order, even if they are not all part of the same shard. quantization can have weird behaviors when there are uniform values, just like this test does. closes #109978 (cherry picked from commit 9dbe97b2cbaa95eb7913879a5e1e0c1a0e330fc0) --- muted-tests.yml | 5 ++--- .../search.vectors/41_knn_search_half_byte_quantized.yml | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index d46a9355c201f..d20f46a6a5617 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -94,9 +94,8 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT - method: test {p0=search.vectors/41_knn_search_half_byte_quantized/Test create, merge, and search cosine} - issue: https://github.com/elastic/elasticsearch/issues/109978 +- class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" + issue: "https://github.com/elastic/elasticsearch/issues/110591" # Examples: # diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml index cb5aae482507a..5f1af2ca5c52f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml @@ -428,7 +428,7 @@ setup: index: hnsw_byte_quantized_merge_cosine id: "1" body: - embedding: [1.0, 1.0, 1.0, 1.0] + embedding: [0.5, 0.5, 0.5, 0.5, 0.5, 1.0] # Flush in order to provoke a merge later - do: @@ -439,7 +439,7 @@ setup: index: hnsw_byte_quantized_merge_cosine id: "2" body: - embedding: [1.0, 1.0, 1.0, 2.0] + embedding: [0.0, 0.0, 0.0, 1.0, 1.0, 0.5] # Flush in order to provoke a merge later - do: @@ -450,7 +450,7 @@ setup: index: hnsw_byte_quantized_merge_cosine id: "3" body: - embedding: [1.0, 1.0, 1.0, 3.0] + embedding: [0.0, 0.0, 0.0, 0.0, 0.0, 10.5] - do: indices.forcemerge: @@ -468,7 +468,7 @@ setup: query: knn: field: embedding - query_vector: [1.0, 1.0, 1.0, 1.0] + query_vector: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] num_candidates: 10 - length: { hits.hits: 3 } From 48cbdbfb2da8bed8e2c39db87840fa4614c6cea1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Tue, 9 Jul 2024 15:28:00 +0200 Subject: [PATCH 027/406] ESQL: Fix Max not working with negative or zero doubles (#110635) Partial backport from https://github.com/elastic/elasticsearch/pull/110586 Just the Max fix and an extra test for it. --- docs/changelog/110586.yaml | 5 +++++ .../compute/aggregation/MaxDoubleAggregator.java | 2 +- .../aggregation/MaxDoubleAggregatorFunctionTests.java | 5 ++++- 3 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/110586.yaml diff --git a/docs/changelog/110586.yaml b/docs/changelog/110586.yaml new file mode 100644 index 0000000000000..cc2bcb85a2dac --- /dev/null +++ b/docs/changelog/110586.yaml @@ -0,0 +1,5 @@ +pr: 110586 +summary: "ESQL: Fix Max doubles bug with negatives and add tests for Max and Min" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java index ee6555c4af67d..f0804278e5002 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java @@ -16,7 +16,7 @@ class MaxDoubleAggregator { public static double init() { - return Double.MIN_VALUE; + return -Double.MAX_VALUE; } public static double combine(double current, double v) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java index 9d638fae4e822..877db42a81b50 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java @@ -22,7 +22,10 @@ public class MaxDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase { @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { - return new SequenceDoubleBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToDouble(l -> ESTestCase.randomDouble())); + return new SequenceDoubleBlockSourceOperator( + blockFactory, + LongStream.range(0, size).mapToDouble(l -> ESTestCase.randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true)) + ); } @Override From f4fdb3077c6fae59e5765e53a3b6da1e97ffed5e Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 9 Jul 2024 08:25:52 -0700 Subject: [PATCH 028/406] [DOCS] Fix typo: though -> through (#110636) (#110644) (cherry picked from commit 1b6d44b55d68b9b2efc03b5894d10aafdf70837d) Co-authored-by: David Kyle --- docs/reference/inference/delete-inference.asciidoc | 2 +- docs/reference/inference/get-inference.asciidoc | 2 +- docs/reference/inference/inference-apis.asciidoc | 2 +- docs/reference/inference/post-inference.asciidoc | 2 +- docs/reference/inference/put-inference.asciidoc | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc index 2f9d9511e6326..4df72ba672092 100644 --- a/docs/reference/inference/delete-inference.asciidoc +++ b/docs/reference/inference/delete-inference.asciidoc @@ -8,7 +8,7 @@ Deletes an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc index 7f4dc1c496837..c3fe841603bcc 100644 --- a/docs/reference/inference/get-inference.asciidoc +++ b/docs/reference/inference/get-inference.asciidoc @@ -8,7 +8,7 @@ Retrieves {infer} endpoint information. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 896cb02a9e699..02a57504da1cf 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -6,7 +6,7 @@ experimental[] IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index 3ad23ac3300cc..52131c0b10776 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -8,7 +8,7 @@ Performs an inference task on an input text by using an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 101c0a24b66b7..656feb54ffe42 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -8,7 +8,7 @@ Creates an {infer} endpoint to perform an {infer} task. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI or Hugging Face. -For built-in models and models uploaded though Eland, the {infer} APIs offer an alternative way to use and manage trained models. +For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. From 2a6ad970487458d43584830ba7463baaef75306d Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 9 Jul 2024 18:38:16 +0300 Subject: [PATCH 029/406] Remove dep com.nimbusds:nimbus-jose-jwt from module org.elasticsearch.xcore (#110565) (#110642) The types from com.nimbusds.jwt are almost not needed in x-pack/plugin/core. They're only needed in module org.elasticsearch.security, x-pack:plugin:security project. --- x-pack/plugin/core/build.gradle | 23 +----------- .../core/src/main/java/module-info.java | 1 - .../xpack/core/security/action/Grant.java | 30 --------------- .../licenses/nimbus-jose-jwt-LICENSE.txt | 0 .../licenses/nimbus-jose-jwt-NOTICE.txt | 0 .../authc/jwt/JwtRealmSingleNodeTests.java | 1 - .../security/action/TransportGrantAction.java | 37 ++++++++++++++++++- .../security/authc/jwt/JwkSetLoader.java | 1 - .../security/authc/jwt/JwkValidateUtil.java | 1 - .../authc/jwt/JwtAuthenticationToken.java | 2 +- .../security/authc/jwt/JwtAuthenticator.java | 1 - .../xpack/security/authc/jwt/JwtRealm.java | 2 - .../authc/jwt/JwtSignatureValidator.java | 3 +- .../xpack}/security/authc/jwt/JwtUtil.java | 3 +- .../oidc/OpenIdConnectAuthenticator.java | 2 +- .../authc/jwt/JwtAuthenticatorTests.java | 1 - .../xpack/security/authc/jwt/JwtIssuer.java | 1 - .../authc/jwt/JwtRealmAuthenticateTests.java | 1 - .../authc/jwt/JwtRealmGenerateTests.java | 1 - .../security/authc/jwt/JwtRealmInspector.java | 1 - .../security/authc/jwt/JwtRealmTestCase.java | 1 - .../security/authc/jwt/JwtUtilTests.java | 1 - 22 files changed, 42 insertions(+), 72 deletions(-) rename x-pack/plugin/{core => security}/licenses/nimbus-jose-jwt-LICENSE.txt (100%) rename x-pack/plugin/{core => security}/licenses/nimbus-jose-jwt-NOTICE.txt (100%) rename x-pack/plugin/{core/src/main/java/org/elasticsearch/xpack/core => security/src/main/java/org/elasticsearch/xpack}/security/authc/jwt/JwtAuthenticationToken.java (98%) rename x-pack/plugin/{core/src/main/java/org/elasticsearch/xpack/core => security/src/main/java/org/elasticsearch/xpack}/security/authc/jwt/JwtUtil.java (99%) diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 0c65c7e4b6d29..1ed59d6fe3581 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -51,7 +51,6 @@ dependencies { // security deps api 'com.unboundid:unboundid-ldapsdk:6.0.3' - api "com.nimbusds:nimbus-jose-jwt:9.23" implementation project(":x-pack:plugin:core:template-resources") @@ -135,27 +134,7 @@ tasks.named("thirdPartyAudit").configure { //commons-logging provided dependencies 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', - 'javax.jms.Message', - // Optional dependency of nimbus-jose-jwt for handling Ed25519 signatures and ECDH with X25519 (RFC 8037) - 'com.google.crypto.tink.subtle.Ed25519Sign', - 'com.google.crypto.tink.subtle.Ed25519Sign$KeyPair', - 'com.google.crypto.tink.subtle.Ed25519Verify', - 'com.google.crypto.tink.subtle.X25519', - 'com.google.crypto.tink.subtle.XChaCha20Poly1305', - // optional dependencies for nimbus-jose-jwt - 'org.bouncycastle.asn1.pkcs.PrivateKeyInfo', - 'org.bouncycastle.asn1.x509.AlgorithmIdentifier', - 'org.bouncycastle.asn1.x509.SubjectPublicKeyInfo', - 'org.bouncycastle.cert.X509CertificateHolder', - 'org.bouncycastle.cert.jcajce.JcaX509CertificateHolder', - 'org.bouncycastle.crypto.InvalidCipherTextException', - 'org.bouncycastle.crypto.engines.AESEngine', - 'org.bouncycastle.crypto.modes.GCMBlockCipher', - 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider', - 'org.bouncycastle.jce.provider.BouncyCastleProvider', - 'org.bouncycastle.openssl.PEMKeyPair', - 'org.bouncycastle.openssl.PEMParser', - 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter' + 'javax.jms.Message' ) } diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index 282072417875b..72436bb9d5171 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -22,7 +22,6 @@ requires unboundid.ldapsdk; requires org.elasticsearch.tdigest; requires org.elasticsearch.xcore.templates; - requires com.nimbusds.jose.jwt; exports org.elasticsearch.index.engine.frozen; exports org.elasticsearch.license; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java index b186ab45a7dc7..c98564251cd43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java @@ -7,19 +7,13 @@ package org.elasticsearch.xpack.core.security.action; -import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.support.BearerToken; -import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import java.io.IOException; @@ -136,30 +130,6 @@ public void setClientAuthentication(ClientAuthentication clientAuthentication) { this.clientAuthentication = clientAuthentication; } - public AuthenticationToken getAuthenticationToken() { - assert validate(null) == null : "grant is invalid"; - return switch (type) { - case PASSWORD_GRANT_TYPE -> new UsernamePasswordToken(username, password); - case ACCESS_TOKEN_GRANT_TYPE -> { - SecureString clientAuthentication = this.clientAuthentication != null ? this.clientAuthentication.value() : null; - AuthenticationToken token = JwtAuthenticationToken.tryParseJwt(accessToken, clientAuthentication); - if (token != null) { - yield token; - } - if (clientAuthentication != null) { - clientAuthentication.close(); - throw new ElasticsearchSecurityException( - "[client_authentication] not supported with the supplied access_token type", - RestStatus.BAD_REQUEST - ); - } - // here we effectively assume it's an ES access token (from the {@code TokenService}) - yield new BearerToken(accessToken); - } - default -> throw new ElasticsearchSecurityException("the grant type [{}] is not supported", type); - }; - } - public ActionRequestValidationException validate(ActionRequestValidationException validationException) { if (type == null) { validationException = addValidationError("[grant_type] is required", validationException); diff --git a/x-pack/plugin/core/licenses/nimbus-jose-jwt-LICENSE.txt b/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt similarity index 100% rename from x-pack/plugin/core/licenses/nimbus-jose-jwt-LICENSE.txt rename to x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt diff --git a/x-pack/plugin/core/licenses/nimbus-jose-jwt-NOTICE.txt b/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt similarity index 100% rename from x-pack/plugin/core/licenses/nimbus-jose-jwt-NOTICE.txt rename to x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java index 2ced54a513146..435706dce7019 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java @@ -52,7 +52,6 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Realm; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.elasticsearch.xpack.security.Security; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java index 667b513555594..fffcb476abaa4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java @@ -7,24 +7,33 @@ package org.elasticsearch.xpack.security.action; +import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.Grant; import org.elasticsearch.xpack.core.security.action.GrantRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.support.BearerToken; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.security.authz.AuthorizationService; +import static org.elasticsearch.xpack.core.security.action.Grant.ACCESS_TOKEN_GRANT_TYPE; +import static org.elasticsearch.xpack.core.security.action.Grant.PASSWORD_GRANT_TYPE; + public abstract class TransportGrantAction extends TransportAction< Request, Response> { @@ -50,7 +59,7 @@ public TransportGrantAction( @Override public final void doExecute(Task task, Request request, ActionListener listener) { try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - final AuthenticationToken authenticationToken = request.getGrant().getAuthenticationToken(); + final AuthenticationToken authenticationToken = getAuthenticationToken(request.getGrant()); assert authenticationToken != null : "authentication token must not be null"; final String runAsUsername = request.getGrant().getRunAsUsername(); @@ -109,4 +118,30 @@ protected abstract void doExecuteWithGrantAuthentication( Authentication authentication, ActionListener listener ); + + public static AuthenticationToken getAuthenticationToken(Grant grant) { + assert grant.validate(null) == null : "grant is invalid"; + return switch (grant.getType()) { + case PASSWORD_GRANT_TYPE -> new UsernamePasswordToken(grant.getUsername(), grant.getPassword()); + case ACCESS_TOKEN_GRANT_TYPE -> { + SecureString clientAuthentication = grant.getClientAuthentication() != null + ? grant.getClientAuthentication().value() + : null; + AuthenticationToken token = JwtAuthenticationToken.tryParseJwt(grant.getAccessToken(), clientAuthentication); + if (token != null) { + yield token; + } + if (clientAuthentication != null) { + clientAuthentication.close(); + throw new ElasticsearchSecurityException( + "[client_authentication] not supported with the supplied access_token type", + RestStatus.BAD_REQUEST + ); + } + // here we effectively assume it's an ES access token (from the {@code TokenService}) + yield new BearerToken(grant.getAccessToken()); + } + default -> throw new ElasticsearchSecurityException("the grant type [{}] is not supported", grant.getType()); + }; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java index 0266fc7488e29..063cc85ea0187 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.ssl.SSLService; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java index cc07b7dfa8381..89391f91a2731 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import java.nio.charset.StandardCharsets; import java.security.PublicKey; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java index ebfaae72b9df2..cfef9aed5967a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.core.security.authc.jwt; +package org.elasticsearch.xpack.security.authc.jwt; import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java index b06aba1c9d87a..2345add07ba51 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java @@ -19,7 +19,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java index 30a7e438e70b0..7613e7b3972af 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java @@ -31,9 +31,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.support.CacheIteratorHelper; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java index e183ee7d73ac2..b1ee1b77998ec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java @@ -35,14 +35,13 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.ssl.SSLService; import java.util.Arrays; import java.util.List; import java.util.stream.Stream; -import static org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil.toStringRedactSignature; +import static org.elasticsearch.xpack.security.authc.jwt.JwtUtil.toStringRedactSignature; public interface JwtSignatureValidator extends Releasable { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java similarity index 99% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java index d70b76f8bc574..928ecd7fa265d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.authc.jwt; +package org.elasticsearch.xpack.security.authc.jwt; import com.nimbusds.jose.JWSObject; import com.nimbusds.jose.jwk.JWK; @@ -47,6 +47,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import java.io.InputStream; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index e637bda19d886..0f34850b861b7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -91,9 +91,9 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.jwt.JwtUtil; import java.io.IOException; import java.net.URI; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java index 7a44ebae95738..6d4861212e286 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java index 3d4d9eae6acd0..789ac04c40622 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java @@ -14,7 +14,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.user.User; import java.io.Closeable; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java index bf6c64242701b..4f7b82a16e8f1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.user.User; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java index 7a0e138305b83..8a5daa642002e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.user.User; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java index 40a613a0907c8..7697849179acf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.ClaimSetting; import java.net.URI; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java index 1bc49cb628464..ffc1fec1f5788 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java @@ -28,7 +28,6 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.ClientAuthenticationType; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java index 7d90dffd7517c..6fab33b4d6adf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; From 74a7070780632de7e48ff9f3116bfa04b65cf9b5 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 9 Jul 2024 14:17:03 -0700 Subject: [PATCH 030/406] Remove Windows BWC pull request pipeline (#110664) (#110670) We've already removed Windows-specific BWC jobs in our periodic pipelines. They shouldn't behave differently and are very prone to timeouts so let's just remove them from pull requests when the `test-windows` label is added. --- .../pull-request/bwc-snapshots-windows.yml | 20 ------------------- 1 file changed, 20 deletions(-) delete mode 100644 .buildkite/pipelines/pull-request/bwc-snapshots-windows.yml diff --git a/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml b/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml deleted file mode 100644 index d37bdf380f926..0000000000000 --- a/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml +++ /dev/null @@ -1,20 +0,0 @@ -config: - allow-labels: test-windows -steps: - - group: bwc-snapshots-windows - steps: - - label: "{{matrix.BWC_VERSION}} / bwc-snapshots-windows" - key: "bwc-snapshots-windows" - command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh - env: - GRADLE_TASK: "v{{matrix.BWC_VERSION}}#bwcTest" - timeout_in_minutes: 300 - matrix: - setup: - BWC_VERSION: $SNAPSHOT_BWC_VERSIONS - agents: - provider: gcp - image: family/elasticsearch-windows-2022 - machineType: custom-32-98304 - diskType: pd-ssd - diskSizeGb: 350 From 06cfe312894f52ec8df9968e44f63aeba1def8d7 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 9 Jul 2024 17:29:14 -0500 Subject: [PATCH 031/406] Do not run TickerScheduleTriggerEngine watches if the schedule trigger engine is paused (#110061) (#110674) --- docs/changelog/110061.yaml | 6 +++++ .../engine/TickerScheduleTriggerEngine.java | 24 +++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/110061.yaml diff --git a/docs/changelog/110061.yaml b/docs/changelog/110061.yaml new file mode 100644 index 0000000000000..1880a2a197722 --- /dev/null +++ b/docs/changelog/110061.yaml @@ -0,0 +1,6 @@ +pr: 110061 +summary: Avoiding running watch jobs in TickerScheduleTriggerEngine if it is paused +area: Watcher +type: bug +issues: + - 105933 diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index ba07c3137340d..ced131640f0ee 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -34,6 +34,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; @@ -50,6 +51,7 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { private final TimeValue tickInterval; private final Map schedules = new ConcurrentHashMap<>(); private final Ticker ticker; + private final AtomicBoolean isRunning = new AtomicBoolean(false); public TickerScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { super(scheduleRegistry, clock); @@ -60,7 +62,8 @@ public TickerScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleR @Override public synchronized void start(Collection jobs) { long startTime = clock.millis(); - logger.info("Watcher starting watches at {}", WatcherDateTimeUtils.dateTimeFormatter.formatMillis(startTime)); + isRunning.set(true); + logger.info("Starting watcher engine at {}", WatcherDateTimeUtils.dateTimeFormatter.formatMillis(startTime)); Map startingSchedules = Maps.newMapWithExpectedSize(jobs.size()); for (Watch job : jobs) { if (job.trigger() instanceof ScheduleTrigger trigger) { @@ -81,17 +84,22 @@ public synchronized void start(Collection jobs) { @Override public void stop() { + logger.info("Stopping watcher engine"); + isRunning.set(false); schedules.clear(); ticker.close(); } @Override - public synchronized void pauseExecution() { + public void pauseExecution() { + logger.info("Pausing watcher engine"); + isRunning.set(false); schedules.clear(); } @Override public void add(Watch watch) { + logger.trace("Adding watch [{}] to engine (engine is running: {})", watch.id(), isRunning.get()); assert watch.trigger() instanceof ScheduleTrigger; ScheduleTrigger trigger = (ScheduleTrigger) watch.trigger(); ActiveSchedule currentSchedule = schedules.get(watch.id()); @@ -106,13 +114,25 @@ public void add(Watch watch) { @Override public boolean remove(String jobId) { + logger.debug("Removing watch [{}] from engine (engine is running: {})", jobId, isRunning.get()); return schedules.remove(jobId) != null; } void checkJobs() { + if (isRunning.get() == false) { + logger.debug( + "Watcher not running because the engine is paused. Currently scheduled watches being skipped: {}", + schedules.size() + ); + return; + } long triggeredTime = clock.millis(); List events = new ArrayList<>(); for (ActiveSchedule schedule : schedules.values()) { + if (isRunning.get() == false) { + logger.debug("Watcher paused while running [{}]", schedule.name); + break; + } long scheduledTime = schedule.check(triggeredTime); if (scheduledTime > 0) { ZonedDateTime triggeredDateTime = utcDateTimeAtEpochMillis(triggeredTime); From 18aab96771241a1291bc1d2161a0405ca10fac43 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 9 Jul 2024 17:52:02 -0500 Subject: [PATCH 032/406] Adding a unit test for GeoIpDownloader.cleanDatabases (#110650) (#110675) Co-authored-by: Joe Gallo --- .../ingest/geoip/GeoIpDownloaderTests.java | 95 ++++++++++++++++++- 1 file changed, 93 insertions(+), 2 deletions(-) diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 4834c581e9386..4d5070d96683e 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -30,11 +30,17 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStats; import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.PersistentTaskResponse; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -49,6 +55,9 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.temporal.ChronoUnit; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -63,6 +72,8 @@ import static org.elasticsearch.ingest.geoip.GeoIpDownloader.MAX_CHUNK_SIZE; import static org.elasticsearch.tasks.TaskId.EMPTY_TASK_ID; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; @@ -76,8 +87,9 @@ public class GeoIpDownloaderTests extends ESTestCase { private GeoIpDownloader geoIpDownloader; @Before - public void setup() { + public void setup() throws IOException { httpClient = mock(HttpClient.class); + when(httpClient.getBytes(anyString())).thenReturn("[]".getBytes(StandardCharsets.UTF_8)); clusterService = mock(ClusterService.class); threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); when(clusterService.getClusterSettings()).thenReturn( @@ -109,7 +121,13 @@ public void setup() { () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), () -> true - ); + ) { + { + GeoIpTaskParams geoIpTaskParams = mock(GeoIpTaskParams.class); + when(geoIpTaskParams.getWriteableName()).thenReturn(GeoIpDownloader.GEOIP_DOWNLOADER); + init(new PersistentTasksService(clusterService, threadPool, client), null, null, 0); + } + }; } @After @@ -541,6 +559,79 @@ public void testUpdateDatabasesIndexNotReady() { verifyNoInteractions(httpClient); } + public void testThatRunDownloaderDeletesExpiredDatabases() { + /* + * This test puts some expired databases and some non-expired ones into the GeoIpTaskState, and then calls runDownloader(), making + * sure that the expired databases have been deleted. + */ + AtomicInteger updatePersistentTaskStateCount = new AtomicInteger(0); + AtomicInteger deleteCount = new AtomicInteger(0); + int expiredDatabasesCount = randomIntBetween(1, 100); + int unexpiredDatabasesCount = randomIntBetween(0, 100); + Map databases = new HashMap<>(); + for (int i = 0; i < expiredDatabasesCount; i++) { + databases.put("expiredDatabase" + i, newGeoIpTaskStateMetadata(true)); + } + for (int i = 0; i < unexpiredDatabasesCount; i++) { + databases.put("unexpiredDatabase" + i, newGeoIpTaskStateMetadata(false)); + } + GeoIpTaskState geoIpTaskState = new GeoIpTaskState(databases); + geoIpDownloader.setState(geoIpTaskState); + client.addHandler( + UpdatePersistentTaskStatusAction.INSTANCE, + (UpdatePersistentTaskStatusAction.Request request, ActionListener taskResponseListener) -> { + + PersistentTasksCustomMetadata.Assignment assignment = mock(PersistentTasksCustomMetadata.Assignment.class); + PersistentTasksCustomMetadata.PersistentTask persistentTask = new PersistentTasksCustomMetadata.PersistentTask<>( + GeoIpDownloader.GEOIP_DOWNLOADER, + GeoIpDownloader.GEOIP_DOWNLOADER, + new GeoIpTaskParams(), + request.getAllocationId(), + assignment + ); + taskResponseListener.onResponse(new PersistentTaskResponse(new PersistentTask<>(persistentTask, request.getState()))); + updatePersistentTaskStateCount.incrementAndGet(); + } + ); + client.addHandler( + DeleteByQueryAction.INSTANCE, + (DeleteByQueryRequest request, ActionListener flushResponseActionListener) -> { + deleteCount.incrementAndGet(); + } + ); + geoIpDownloader.runDownloader(); + assertThat(geoIpDownloader.getStatus().getExpiredDatabases(), equalTo(expiredDatabasesCount)); + for (int i = 0; i < expiredDatabasesCount; i++) { + // This currently fails because we subtract one millisecond from the lastChecked time + // assertThat(geoIpDownloader.state.getDatabases().get("expiredDatabase" + i).lastCheck(), equalTo(-1L)); + } + for (int i = 0; i < unexpiredDatabasesCount; i++) { + assertThat( + geoIpDownloader.state.getDatabases().get("unexpiredDatabase" + i).lastCheck(), + greaterThanOrEqualTo(Instant.now().minus(30, ChronoUnit.DAYS).toEpochMilli()) + ); + } + assertThat(deleteCount.get(), equalTo(expiredDatabasesCount)); + assertThat(updatePersistentTaskStateCount.get(), equalTo(expiredDatabasesCount)); + geoIpDownloader.runDownloader(); + /* + * The following two lines assert current behavior that might not be desirable -- we continue to delete expired databases every + * time that runDownloader runs. This seems unnecessary. + */ + assertThat(deleteCount.get(), equalTo(expiredDatabasesCount * 2)); + assertThat(updatePersistentTaskStateCount.get(), equalTo(expiredDatabasesCount * 2)); + } + + private GeoIpTaskState.Metadata newGeoIpTaskStateMetadata(boolean expired) { + Instant lastChecked; + if (expired) { + lastChecked = Instant.now().minus(randomIntBetween(31, 100), ChronoUnit.DAYS); + } else { + lastChecked = Instant.now().minus(randomIntBetween(0, 29), ChronoUnit.DAYS); + } + return new GeoIpTaskState.Metadata(0, 0, 0, randomAlphaOfLength(20), lastChecked.toEpochMilli()); + } + private static class MockClient extends NoOpClient { private final Map, BiConsumer>> handlers = new HashMap<>(); From 6833aa0666d16a9ce52cfbf2350d7119075f5078 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 9 Jul 2024 18:53:51 -0500 Subject: [PATCH 033/406] Removing the use of Stream::peek from GeoIpDownloader::cleanDatabases (#110666) (#110679) --- docs/changelog/110666.yaml | 5 ++++ .../ingest/geoip/GeoIpDownloader.java | 23 ++++++++----------- .../ingest/geoip/GeoIpDownloaderTests.java | 3 +-- 3 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 docs/changelog/110666.yaml diff --git a/docs/changelog/110666.yaml b/docs/changelog/110666.yaml new file mode 100644 index 0000000000000..d96f8e2024c81 --- /dev/null +++ b/docs/changelog/110666.yaml @@ -0,0 +1,5 @@ +pr: 110666 +summary: Removing the use of Stream::peek from `GeoIpDownloader::cleanDatabases` +area: Ingest Node +type: bug +issues: [] diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 5239e96856b7f..13394a2a0c7cc 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -318,22 +318,19 @@ public void requestReschedule() { } private void cleanDatabases() { - long expiredDatabases = state.getDatabases() + List> expiredDatabases = state.getDatabases() .entrySet() .stream() .filter(e -> e.getValue().isValid(clusterService.state().metadata().settings()) == false) - .peek(e -> { - String name = e.getKey(); - Metadata meta = e.getValue(); - deleteOldChunks(name, meta.lastChunk() + 1); - state = state.put( - name, - new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1) - ); - updateTaskState(); - }) - .count(); - stats = stats.expiredDatabases((int) expiredDatabases); + .toList(); + expiredDatabases.forEach(e -> { + String name = e.getKey(); + Metadata meta = e.getValue(); + deleteOldChunks(name, meta.lastChunk() + 1); + state = state.put(name, new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1)); + updateTaskState(); + }); + stats = stats.expiredDatabases(expiredDatabases.size()); } @Override diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 4d5070d96683e..6a83fe69473f7 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -580,7 +580,6 @@ public void testThatRunDownloaderDeletesExpiredDatabases() { client.addHandler( UpdatePersistentTaskStatusAction.INSTANCE, (UpdatePersistentTaskStatusAction.Request request, ActionListener taskResponseListener) -> { - PersistentTasksCustomMetadata.Assignment assignment = mock(PersistentTasksCustomMetadata.Assignment.class); PersistentTasksCustomMetadata.PersistentTask persistentTask = new PersistentTasksCustomMetadata.PersistentTask<>( GeoIpDownloader.GEOIP_DOWNLOADER, @@ -589,8 +588,8 @@ public void testThatRunDownloaderDeletesExpiredDatabases() { request.getAllocationId(), assignment ); - taskResponseListener.onResponse(new PersistentTaskResponse(new PersistentTask<>(persistentTask, request.getState()))); updatePersistentTaskStateCount.incrementAndGet(); + taskResponseListener.onResponse(new PersistentTaskResponse(new PersistentTask<>(persistentTask, request.getState()))); } ); client.addHandler( From 77392d64e7643410d384aef085001d1935be9327 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Wed, 10 Jul 2024 10:15:06 +0200 Subject: [PATCH 034/406] Remove `default_field: message` from metrics index templates (#110651) (#110683) This is a follow-up from https://github.com/elastic/elasticsearch/pull/102456 --- docs/changelog/110651.yaml | 5 +++++ .../src/main/resources/metrics@settings.json | 3 --- .../src/main/resources/metrics@tsdb-settings.json | 3 --- .../org/elasticsearch/xpack/stack/StackTemplateRegistry.java | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/110651.yaml diff --git a/docs/changelog/110651.yaml b/docs/changelog/110651.yaml new file mode 100644 index 0000000000000..c25c63ee0284a --- /dev/null +++ b/docs/changelog/110651.yaml @@ -0,0 +1,5 @@ +pr: 110651 +summary: "Remove `default_field: message` from metrics index templates" +area: Data streams +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json index 4f3fac1aed5ae..9960bd2e7fdac 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json @@ -10,9 +10,6 @@ "total_fields": { "ignore_dynamic_beyond_limit": true } - }, - "query": { - "default_field": ["message"] } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json index b0db168e8189d..cb0e2cbffb50b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json @@ -9,9 +9,6 @@ "total_fields": { "ignore_dynamic_beyond_limit": true } - }, - "query": { - "default_field": ["message"] } } } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index aa1e8858163a5..648146ccdcc61 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -47,7 +47,7 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { // The stack template registry version. This number must be incremented when we make changes // to built-in templates. - public static final int REGISTRY_VERSION = 11; + public static final int REGISTRY_VERSION = 12; public static final String TEMPLATE_VERSION_VARIABLE = "xpack.stack.template.version"; public static final Setting STACK_TEMPLATES_ENABLED = Setting.boolSetting( From b8fbe8eed7c6761e308eb63d83a0b47ea667aa9a Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Wed, 10 Jul 2024 12:01:24 +0200 Subject: [PATCH 035/406] Remove version barrier for synthetic version based features in tests (#110656) (#110684) --- .../test/rest/ESRestTestFeatureService.java | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java index 78a4126ec09db..92d72afbf9d52 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -86,19 +86,6 @@ public boolean clusterHasFeature(String featureId) { Matcher matcher = VERSION_FEATURE_PATTERN.matcher(featureId); if (matcher.matches()) { Version extractedVersion = Version.fromString(matcher.group(1)); - if (Version.V_8_15_0.before(extractedVersion)) { - // As of version 8.14.0 REST tests have been migrated to use features only. - // For migration purposes we provide a synthetic version feature gte_vX.Y.Z for any version at or before 8.15.0 - // allowing for some transition period. - throw new IllegalArgumentException( - Strings.format( - "Synthetic version features are only available before [%s] for migration purposes! " - + "Please add a cluster feature to an appropriate FeatureSpecification; test-only historical-features " - + "can be supplied via ESRestTestCase#additionalTestOnlyHistoricalFeatures()", - Version.V_8_15_0 - ) - ); - } return version.onOrAfter(extractedVersion); } From a3b06b088410fe8d5d0b18df0de8cd0218c7abbd Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 10 Jul 2024 14:26:11 +0300 Subject: [PATCH 036/406] Backport: Updating ESSingleNodeTestCase to ensure that all free_context actions have been consumed before tearDown (#110685) --- .../org/elasticsearch/test/ESSingleNodeTestCase.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 8526acc851c72..7fdc5765a90e8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -69,6 +69,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.action.search.SearchTransportService.FREE_CONTEXT_ACTION_NAME; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.test.NodeRoles.dataNode; @@ -130,6 +131,8 @@ public void tearDown() throws Exception { logger.trace("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName()); awaitIndexShardCloseAsyncTasks(); ensureNoInitializingShards(); + ensureAllFreeContextActionsAreConsumed(); + SearchService searchService = getInstanceFromNode(SearchService.class); assertThat(searchService.getActiveContexts(), equalTo(0)); assertThat(searchService.getOpenScrollContexts(), equalTo(0)); @@ -455,6 +458,14 @@ protected void ensureNoInitializingShards() { assertFalse("timed out waiting for shards to initialize", actionGet.isTimedOut()); } + /** + * waits until all free_context actions have been handled by the generic thread pool + */ + protected void ensureAllFreeContextActionsAreConsumed() throws Exception { + logger.info("--> waiting for all free_context tasks to complete within a reasonable time"); + safeGet(clusterAdmin().prepareListTasks().setActions(FREE_CONTEXT_ACTION_NAME + "*").setWaitForCompletion(true).execute()); + } + /** * Whether we'd like to enable inter-segment search concurrency and increase the likelihood of leveraging it, by creating multiple * slices with a low amount of documents in them, which would not be allowed in production. From 58e7a30d5024e9305749efc92c5ebb34d85b9129 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 10 Jul 2024 07:59:46 -0400 Subject: [PATCH 037/406] Fix search template examples by removing params on put (#110660) (#110703) --- docs/reference/search/multi-search-template-api.asciidoc | 3 --- docs/reference/search/render-search-template-api.asciidoc | 3 --- docs/reference/search/search-template-api.asciidoc | 3 --- .../reference/search/search-your-data/search-template.asciidoc | 3 --- 4 files changed, 12 deletions(-) diff --git a/docs/reference/search/multi-search-template-api.asciidoc b/docs/reference/search/multi-search-template-api.asciidoc index c8eea52a6fd9b..b1c9518b1f2bc 100644 --- a/docs/reference/search/multi-search-template-api.asciidoc +++ b/docs/reference/search/multi-search-template-api.asciidoc @@ -22,9 +22,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/render-search-template-api.asciidoc b/docs/reference/search/render-search-template-api.asciidoc index 1f259dddf6879..0c782f26068e6 100644 --- a/docs/reference/search/render-search-template-api.asciidoc +++ b/docs/reference/search/render-search-template-api.asciidoc @@ -22,9 +22,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index 038396e558607..c60b5281c05e5 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -21,9 +21,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/search-your-data/search-template.asciidoc b/docs/reference/search/search-your-data/search-template.asciidoc index 7a7f09f4a37a7..489a03c0a6a2a 100644 --- a/docs/reference/search/search-your-data/search-template.asciidoc +++ b/docs/reference/search/search-your-data/search-template.asciidoc @@ -42,9 +42,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } From 5fb62fead05526332a8907ba302e2af61d33e556 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 10 Jul 2024 16:02:55 +0300 Subject: [PATCH 038/406] unmuting tests (#110708) --- muted-tests.yml | 15 --------------- .../HistogramPercentileAggregationTests.java | 1 - .../textsimilarity/TextSimilarityRankTests.java | 1 - 3 files changed, 17 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index d20f46a6a5617..3653617bd2ce6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -44,12 +44,6 @@ tests: - class: "org.elasticsearch.xpack.test.rest.XPackRestIT" issue: "https://github.com/elastic/elasticsearch/issues/109687" method: "test {p0=sql/translate/Translate SQL}" -- class: org.elasticsearch.action.search.SearchProgressActionListenerIT - method: testSearchProgressWithHits - issue: https://github.com/elastic/elasticsearch/issues/109830 -- class: "org.elasticsearch.xpack.security.ScrollHelperIntegTests" - issue: "https://github.com/elastic/elasticsearch/issues/109905" - method: "testFetchAllEntities" - class: "org.elasticsearch.xpack.esql.action.AsyncEsqlQueryActionIT" issue: "https://github.com/elastic/elasticsearch/issues/109944" method: "testBasicAsyncExecution" @@ -67,18 +61,12 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/110211 - class: "org.elasticsearch.rest.RestControllerIT" issue: "https://github.com/elastic/elasticsearch/issues/110225" -- class: "org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests" - issue: "https://github.com/elastic/elasticsearch/issues/110227" - method: "testGetPrivilegesUsesCache" - class: org.elasticsearch.upgrades.SecurityIndexRolesMetadataMigrationIT method: testMetadataMigratedAfterUpgrade issue: https://github.com/elastic/elasticsearch/issues/110232 - class: org.elasticsearch.compute.lucene.ValueSourceReaderTypeConversionTests method: testLoadAll issue: https://github.com/elastic/elasticsearch/issues/110244 -- class: org.elasticsearch.action.search.SearchProgressActionListenerIT - method: testSearchProgressWithQuery - issue: https://github.com/elastic/elasticsearch/issues/109867 - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testMinVersionAsNewVersion issue: https://github.com/elastic/elasticsearch/issues/95384 @@ -88,9 +76,6 @@ tests: - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testMinVersionAsOldVersion issue: https://github.com/elastic/elasticsearch/issues/109454 -- class: org.elasticsearch.search.aggregations.bucket.terms.RareTermsIT - method: testSingleValuedString - issue: https://github.com/elastic/elasticsearch/issues/110388 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java index f60466bcf43cc..7c6f85104b5f8 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java @@ -241,7 +241,6 @@ public void testTDigestHistogram() throws Exception { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110406") public void testBoxplotHistogram() throws Exception { int compression = TestUtil.nextInt(random(), 200, 300); setupTDigestHistogram(compression); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java index 8cb9305edd057..7fbfe70dbcfe7 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java @@ -151,7 +151,6 @@ public void testRerankInferenceFailure() { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110398") public void testRerankInferenceResultMismatch() { ElasticsearchAssertions.assertFailures( // Execute search with text similarity reranking From 548d7d06d161057846208cc7ac97e131706aa21d Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 10 Jul 2024 14:21:58 -0700 Subject: [PATCH 039/406] Ensure correct runtime Java is used when it matches build Java (#110730) (#110731) Ensures that the value provided by the `runtime.java` system property, or `RUNTIME_JAVA_HOME` environment variable is respected, even when it is the same as the current `JAVA_HOME`. The previous logic had `isRuntimeJavaHomeSet` return `false` when the "requested" Java was the same as the current one. This isn't strictly correct, as the behavior when Java home is unset is to use the bundled JDK. The result was that passing `-Druntime.java=17` when the Gradle daemon was also using Java 17 was to execute tests with the bundled JDK, which could be something entirely different. --- .../internal/info/GlobalBuildInfoPlugin.java | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 42834928bafed..b8ebb454ddb16 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -51,6 +51,7 @@ import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.Optional; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -97,24 +98,25 @@ public void apply(Project project) { JavaVersion minimumCompilerVersion = JavaVersion.toVersion(getResourceContents("/minimumCompilerVersion")); JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion")); - File runtimeJavaHome = findRuntimeJavaHome(); - boolean isRuntimeJavaHomeSet = Jvm.current().getJavaHome().equals(runtimeJavaHome) == false; + Optional selectedRuntimeJavaHome = findRuntimeJavaHome(); + File actualRuntimeJavaHome = selectedRuntimeJavaHome.orElse(Jvm.current().getJavaHome()); + boolean isRuntimeJavaHomeSet = selectedRuntimeJavaHome.isPresent(); GitInfo gitInfo = GitInfo.gitInfo(project.getRootDir()); BuildParams.init(params -> { params.reset(); - params.setRuntimeJavaHome(runtimeJavaHome); + params.setRuntimeJavaHome(actualRuntimeJavaHome); params.setJavaToolChainSpec(resolveToolchainSpecFromEnv()); params.setRuntimeJavaVersion( determineJavaVersion( "runtime java.home", - runtimeJavaHome, + actualRuntimeJavaHome, isRuntimeJavaHomeSet ? minimumRuntimeVersion : Jvm.current().getJavaVersion() ) ); params.setIsRuntimeJavaHomeSet(isRuntimeJavaHomeSet); - JvmInstallationMetadata runtimeJdkMetaData = metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)); + JvmInstallationMetadata runtimeJdkMetaData = metadataDetector.getMetadata(getJavaInstallation(actualRuntimeJavaHome)); params.setRuntimeJavaDetails(formatJavaVendorDetails(runtimeJdkMetaData)); params.setJavaVersions(getAvailableJavaVersions()); params.setMinimumCompilerVersion(minimumCompilerVersion); @@ -298,19 +300,19 @@ private static void assertMinimumCompilerVersion(JavaVersion minimumCompilerVers } } - private File findRuntimeJavaHome() { + private Optional findRuntimeJavaHome() { String runtimeJavaProperty = System.getProperty("runtime.java"); if (runtimeJavaProperty != null) { - return resolveJavaHomeFromToolChainService(runtimeJavaProperty); + return Optional.of(resolveJavaHomeFromToolChainService(runtimeJavaProperty)); } String env = System.getenv("RUNTIME_JAVA_HOME"); if (env != null) { - return new File(env); + return Optional.of(new File(env)); } // fall back to tool chain if set. env = System.getenv("JAVA_TOOLCHAIN_HOME"); - return env == null ? Jvm.current().getJavaHome() : new File(env); + return env == null ? Optional.empty() : Optional.of(new File(env)); } @NotNull From eaa85b2702dc496b87e0b14a60c1409aa094c2fb Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 8 Jul 2024 17:01:47 -0500 Subject: [PATCH 040/406] Updating CloseIndexRequestTests to account for master term (#110611) --- .../action/admin/indices/close/CloseIndexRequestTests.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java index b3caf93fbcddf..24c0f9d97800b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -49,6 +49,9 @@ public void testBwcSerialization() throws Exception { in.setTransportVersion(out.getTransportVersion()); assertEquals(request.getParentTask(), TaskId.readFromStream(in)); assertEquals(request.masterNodeTimeout(), in.readTimeValue()); + if (in.getTransportVersion().onOrAfter(TransportVersions.VERSIONED_MASTER_NODE_REQUESTS)) { + assertEquals(request.masterTerm(), in.readVLong()); + } assertEquals(request.ackTimeout(), in.readTimeValue()); assertArrayEquals(request.indices(), in.readStringArray()); final IndicesOptions indicesOptions = IndicesOptions.readIndicesOptions(in); @@ -75,6 +78,9 @@ public void testBwcSerialization() throws Exception { out.setTransportVersion(version); sample.getParentTask().writeTo(out); out.writeTimeValue(sample.masterNodeTimeout()); + if (out.getTransportVersion().onOrAfter(TransportVersions.VERSIONED_MASTER_NODE_REQUESTS)) { + out.writeVLong(sample.masterTerm()); + } out.writeTimeValue(sample.ackTimeout()); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); From e2ee114ac00caa2ce0e50be88f0ea67d85e3bd89 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Thu, 11 Jul 2024 14:48:23 +0200 Subject: [PATCH 041/406] Aggs: Scripted metric allow list docs (#109635) (#110750) * Document new settings * Mention agg allow list in scripting security doc --- .../modules/indices/search-settings.asciidoc | 33 +++++++++ docs/reference/scripting/security.asciidoc | 67 ++++++++++++++++--- 2 files changed, 92 insertions(+), 8 deletions(-) diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index e43ec076578d4..003974815c4bd 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -33,6 +33,39 @@ a single response. Defaults to 65,536. + Requests that attempt to return more than this limit will return an error. +[[search-settings-only-allowed-scripts]] +`search.aggs.only_allowed_metric_scripts`:: +(<>, boolean) +Configures whether only explicitly allowed scripts can be used in +<>. +Defaults to `false`. ++ +Requests using scripts not contained in either +<> +or +<> +will return an error. + +[[search-settings-allowed-inline-scripts]] +`search.aggs.allowed_inline_metric_scripts`:: +(<>, list of strings) +List of inline scripts that can be used in scripted metrics aggregations when +<> +is set to `true`. +Defaults to an empty list. ++ +Requests using other inline scripts will return an error. + +[[search-settings-allowed-stored-scripts]] +`search.aggs.allowed_stored_metric_scripts`:: +(<>, list of strings) +List of ids of stored scripts that can be used in scripted metrics aggregations when +<> +is set to `true`. +Defaults to an empty list. ++ +Requests using other stored scripts will return an error. + [[indices-query-bool-max-nested-depth]] `indices.query.bool.max_nested_depth`:: (<>, integer) Maximum nested depth of queries. Defaults to `30`. diff --git a/docs/reference/scripting/security.asciidoc b/docs/reference/scripting/security.asciidoc index 0f322d08726b9..249a705e92817 100644 --- a/docs/reference/scripting/security.asciidoc +++ b/docs/reference/scripting/security.asciidoc @@ -9,8 +9,8 @@ security in a defense in depth strategy for scripting. The second layer of security is the https://www.oracle.com/java/technologies/javase/seccodeguide.html[Java Security Manager]. As part of its startup sequence, {es} enables the Java Security Manager to limit the actions that -portions of the code can take. <> uses -the Java Security Manager as an additional layer of defense to prevent scripts +portions of the code can take. <> uses +the Java Security Manager as an additional layer of defense to prevent scripts from doing things like writing files and listening to sockets. {es} uses @@ -18,22 +18,28 @@ from doing things like writing files and listening to sockets. https://www.chromium.org/developers/design-documents/sandbox/osx-sandboxing-design[Seatbelt] in macOS, and https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147[ActiveProcessLimit] -on Windows as additional security layers to prevent {es} from forking or +on Windows as additional security layers to prevent {es} from forking or running other processes. +Finally, scripts used in +<> +can be restricted to a defined list of scripts, or forbidden altogether. +This can prevent users from running particularly slow or resource intensive aggregation +queries. + You can modify the following script settings to restrict the type of scripts -that are allowed to run, and control the available +that are allowed to run, and control the available {painless}/painless-contexts.html[contexts] that scripts can run in. To -implement additional layers in your defense in depth strategy, follow the +implement additional layers in your defense in depth strategy, follow the <>. [[allowed-script-types-setting]] [discrete] === Allowed script types setting -{es} supports two script types: `inline` and `stored`. By default, {es} is -configured to run both types of scripts. To limit what type of scripts are run, -set `script.allowed_types` to `inline` or `stored`. To prevent any scripts from +{es} supports two script types: `inline` and `stored`. By default, {es} is +configured to run both types of scripts. To limit what type of scripts are run, +set `script.allowed_types` to `inline` or `stored`. To prevent any scripts from running, set `script.allowed_types` to `none`. IMPORTANT: If you use {kib}, set `script.allowed_types` to both or just `inline`. @@ -61,3 +67,48 @@ For example, to allow scripts to run only in `scoring` and `update` contexts: ---- script.allowed_contexts: score, update ---- + +[[allowed-script-in-aggs-settings]] +[discrete] +=== Allowed scripts in scripted metrics aggregations + +By default, all scripts are permitted in +<>. +To restrict the set of allowed scripts, set +<> +to `true` and provide the allowed scripts using +<> +and/or +<>. + +To disallow certain script types, omit the corresponding script list +(`search.aggs.allowed_inline_metric_scripts` or +`search.aggs.allowed_stored_metric_scripts`) or set it to an empty array. +When both script lists are not empty, the given stored scripts and the given inline scripts +will be allowed. + +The following example permits only 4 specific stored scripts to be used, and no inline scripts: + +[source,yaml] +---- +search.aggs.only_allowed_metric_scripts: true +search.aggs.allowed_inline_metric_scripts: [] +search.aggs.allowed_stored_metric_scripts: + - script_id_1 + - script_id_2 + - script_id_3 + - script_id_4 +---- + +Conversely, the next example allows specific inline scripts but no stored scripts: + +[source,yaml] +---- +search.aggs.only_allowed_metric_scripts: true +search.aggs.allowed_inline_metric_scripts: + - 'state.transactions = []' + - 'state.transactions.add(doc.some_field.value)' + - 'long sum = 0; for (t in state.transactions) { sum += t } return sum' + - 'long sum = 0; for (a in states) { sum += a } return sum' +search.aggs.allowed_stored_metric_scripts: [] +---- From dcbc230a77453694db4c04079f21bf5f5f8a51af Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Thu, 11 Jul 2024 15:08:57 +0200 Subject: [PATCH 042/406] Group vector queries into new section (#110722) (#110754) --- docs/reference/query-dsl.asciidoc | 6 +-- .../query-dsl/sparse-vector-query.asciidoc | 12 +++--- .../query-dsl/special-queries.asciidoc | 13 ------- .../query-dsl/text-expansion-query.asciidoc | 12 +++--- .../query-dsl/vector-queries.asciidoc | 37 +++++++++++++++++++ 5 files changed, 51 insertions(+), 29 deletions(-) create mode 100644 docs/reference/query-dsl/vector-queries.asciidoc diff --git a/docs/reference/query-dsl.asciidoc b/docs/reference/query-dsl.asciidoc index 4d5504e5fe7ae..2f8f07f21f648 100644 --- a/docs/reference/query-dsl.asciidoc +++ b/docs/reference/query-dsl.asciidoc @@ -72,14 +72,12 @@ include::query-dsl/match-all-query.asciidoc[] include::query-dsl/span-queries.asciidoc[] +include::query-dsl/vector-queries.asciidoc[] + include::query-dsl/special-queries.asciidoc[] include::query-dsl/term-level-queries.asciidoc[] -include::query-dsl/text-expansion-query.asciidoc[] - -include::query-dsl/sparse-vector-query.asciidoc[] - include::query-dsl/minimum-should-match.asciidoc[] include::query-dsl/multi-term-rewrite.asciidoc[] diff --git a/docs/reference/query-dsl/sparse-vector-query.asciidoc b/docs/reference/query-dsl/sparse-vector-query.asciidoc index 80616ff174e36..08dd7ab7f4470 100644 --- a/docs/reference/query-dsl/sparse-vector-query.asciidoc +++ b/docs/reference/query-dsl/sparse-vector-query.asciidoc @@ -1,5 +1,5 @@ [[query-dsl-sparse-vector-query]] -== Sparse vector query +=== Sparse vector query ++++ Sparse vector @@ -19,7 +19,7 @@ For example, a stored vector `{"feature_0": 0.12, "feature_1": 1.2, "feature_2": [discrete] [[sparse-vector-query-ex-request]] -=== Example request using an {nlp} model +==== Example request using an {nlp} model [source,console] ---- @@ -37,7 +37,7 @@ GET _search // TEST[skip: Requires inference] [discrete] -=== Example request using precomputed vectors +==== Example request using precomputed vectors [source,console] ---- @@ -55,7 +55,7 @@ GET _search [discrete] [[sparse-vector-field-params]] -=== Top level parameters for `sparse_vector` +==== Top level parameters for `sparse_vector` `field`:: (Required, string) The name of the field that contains the token-weight pairs to be searched against. @@ -120,7 +120,7 @@ NOTE: The default values for `tokens_freq_ratio_threshold` and `tokens_weight_th [discrete] [[sparse-vector-query-example]] -=== Example ELSER query +==== Example ELSER query The following is an example of the `sparse_vector` query that references the ELSER model to perform semantic search. For a more detailed description of how to perform semantic search by using ELSER and the `sparse_vector` query, refer to <>. @@ -241,7 +241,7 @@ GET my-index/_search [discrete] [[sparse-vector-query-with-pruning-config-and-rescore-example]] -=== Example ELSER query with pruning configuration and rescore +==== Example ELSER query with pruning configuration and rescore The following is an extension to the above example that adds a preview:[] pruning configuration to the `sparse_vector` query. The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index 90cd9a696a6d9..a6d35d4f9b707 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -17,10 +17,6 @@ or collection of documents. This query finds queries that are stored as documents that match with the specified document. -<>:: -A query that finds the _k_ nearest vectors to a query -vector, as measured by a similarity metric. - <>:: A query that computes scores based on the values of numeric features and is able to efficiently skip non-competitive hits. @@ -32,9 +28,6 @@ This query allows a script to act as a filter. Also see the <>:: A query that allows to modify the score of a sub-query with a script. -<>:: -A query that allows you to perform semantic search. - <>:: A query that accepts other queries as json or yaml string. @@ -50,20 +43,14 @@ include::mlt-query.asciidoc[] include::percolate-query.asciidoc[] -include::knn-query.asciidoc[] - include::rank-feature-query.asciidoc[] include::script-query.asciidoc[] include::script-score-query.asciidoc[] -include::semantic-query.asciidoc[] - include::wrapper-query.asciidoc[] include::pinned-query.asciidoc[] include::rule-query.asciidoc[] - -include::weighted-tokens-query.asciidoc[] diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 1c51429b5aa22..8faecad1dbdb9 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -1,5 +1,5 @@ [[query-dsl-text-expansion-query]] -== Text expansion query +=== Text expansion query ++++ Text expansion @@ -12,7 +12,7 @@ The text expansion query uses a {nlp} model to convert the query text into a lis [discrete] [[text-expansion-query-ex-request]] -=== Example request +==== Example request [source,console] ---- @@ -32,14 +32,14 @@ GET _search [discrete] [[text-expansion-query-params]] -=== Top level parameters for `text_expansion` +==== Top level parameters for `text_expansion` ``::: (Required, object) The name of the field that contains the token-weight pairs the NLP model created based on the input text. [discrete] [[text-expansion-rank-feature-field-params]] -=== Top level parameters for `` +==== Top level parameters for `` `model_id`:::: (Required, string) The ID of the model to use to convert the query text into token-weight pairs. @@ -84,7 +84,7 @@ NOTE: The default values for `tokens_freq_ratio_threshold` and `tokens_weight_th [discrete] [[text-expansion-query-example]] -=== Example ELSER query +==== Example ELSER query The following is an example of the `text_expansion` query that references the ELSER model to perform semantic search. For a more detailed description of how to perform semantic search by using ELSER and the `text_expansion` query, refer to <>. @@ -208,7 +208,7 @@ GET my-index/_search [discrete] [[text-expansion-query-with-pruning-config-and-rescore-example]] -=== Example ELSER query with pruning configuration and rescore +==== Example ELSER query with pruning configuration and rescore The following is an extension to the above example that adds a preview:[] pruning configuration to the `text_expansion` query. The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. diff --git a/docs/reference/query-dsl/vector-queries.asciidoc b/docs/reference/query-dsl/vector-queries.asciidoc new file mode 100644 index 0000000000000..fe9f380eeb621 --- /dev/null +++ b/docs/reference/query-dsl/vector-queries.asciidoc @@ -0,0 +1,37 @@ +[[vector-queries]] +== Vector queries + +Vector queries are specialized queries that work on vector fields to efficiently perform <>. + +<>:: +A query that finds the _k_ nearest vectors to a query vector for <> fields, as measured by a similarity metric. + +<>:: +A query used to search <> field types. + +<>:: +A query that allows you to perform semantic search on <> fields. + +[discrete] +=== Deprecated vector queries + +The following queries have been deprecated and will be removed in the near future. +Use the <> query instead. + +<>:: +A query that allows you to perform sparse vector search on <> or <> fields. + +<>:: +Allows to perform text expansion queries optimizing for performance. + +include::knn-query.asciidoc[] + +include::sparse-vector-query.asciidoc[] + +include::semantic-query.asciidoc[] + +include::text-expansion-query.asciidoc[] + +include::weighted-tokens-query.asciidoc[] + + From c683f6c4e9d9dcc11353c4bf008ded79c0e5c6ac Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 11 Jul 2024 14:10:03 +0000 Subject: [PATCH 043/406] Bump versions after 8.14.3 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 6 +++--- .buildkite/pipelines/periodic.yml | 10 +++++----- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/Version.java | 1 + .../resources/org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 89df398af0d1d..0f8a861fb81c6 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.23", "8.14.3", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 173a46b6d600c..311fb8d612ee6 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -577,8 +577,8 @@ steps: env: BWC_VERSION: 8.13.4 - - label: "{{matrix.image}} / 8.14.3 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.3 + - label: "{{matrix.image}} / 8.14.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.4 timeout_in_minutes: 300 matrix: setup: @@ -592,7 +592,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 8.14.3 + BWC_VERSION: 8.14.4 - label: "{{matrix.image}} / 8.15.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 86a61814cf84b..5c6770d946d09 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -642,8 +642,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.14.3 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.3#bwcTest + - label: 8.14.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.4#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -653,7 +653,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 8.14.3 + BWC_VERSION: 8.14.4 retry: automatic: - exit_status: "-1" @@ -751,7 +751,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.23", "8.14.3", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -801,7 +801,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.23", "8.14.3", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index e3b5d9da1ef8a..352cfdfae2d5a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -31,5 +31,5 @@ BWC_VERSION: - "8.11.4" - "8.12.2" - "8.13.4" - - "8.14.3" + - "8.14.4" - "8.15.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 7a7426c3a90a7..8cfa75d3d6bf5 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - "7.17.23" - - "8.14.3" + - "8.14.4" - "8.15.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 9ff7650d19823..b9e81e31037c0 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -179,6 +179,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_14_1 = new Version(8_14_01_99); public static final Version V_8_14_2 = new Version(8_14_02_99); public static final Version V_8_14_3 = new Version(8_14_03_99); + public static final Version V_8_14_4 = new Version(8_14_04_99); public static final Version V_8_15_0 = new Version(8_15_00_99); public static final Version CURRENT = V_8_15_0; diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 5f1972e30198a..7d2697539fa13 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -124,3 +124,4 @@ 8.14.0,8636001 8.14.1,8636001 8.14.2,8636001 +8.14.3,8636001 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index d1116ddf99ee7..f177ab1468cb2 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -124,3 +124,4 @@ 8.14.0,8505000 8.14.1,8505000 8.14.2,8505000 +8.14.3,8505000 From 110293cc9334a22dfbd568d1712171a54e9f5f5a Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 11 Jul 2024 14:17:33 +0000 Subject: [PATCH 044/406] Prune changelogs after 8.14.3 release --- docs/changelog/109850.yaml | 5 ----- docs/changelog/109948.yaml | 5 ----- docs/changelog/110268.yaml | 6 ------ docs/changelog/110400.yaml | 5 ----- 4 files changed, 21 deletions(-) delete mode 100644 docs/changelog/109850.yaml delete mode 100644 docs/changelog/109948.yaml delete mode 100644 docs/changelog/110268.yaml delete mode 100644 docs/changelog/110400.yaml diff --git a/docs/changelog/109850.yaml b/docs/changelog/109850.yaml deleted file mode 100644 index 0f11318765aea..0000000000000 --- a/docs/changelog/109850.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109850 -summary: Ensure tasks preserve versions in `MasterService` -area: Cluster Coordination -type: bug -issues: [] diff --git a/docs/changelog/109948.yaml b/docs/changelog/109948.yaml deleted file mode 100644 index 3f5a281781bcf..0000000000000 --- a/docs/changelog/109948.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109948 -summary: Automatically adjust `ignore_malformed` only for the @timestamp -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/110268.yaml b/docs/changelog/110268.yaml deleted file mode 100644 index adfb467f92e8b..0000000000000 --- a/docs/changelog/110268.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110268 -summary: Disallow index.time_series.end_time setting from being set or updated in normal indices -area: TSDB -type: bug -issues: - - 110265 diff --git a/docs/changelog/110400.yaml b/docs/changelog/110400.yaml deleted file mode 100644 index f2810eba214f1..0000000000000 --- a/docs/changelog/110400.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110400 -summary: Introduce compute listener -area: ES|QL -type: bug -issues: [] From 3ed5cd7dac846935ef29ae8f76b70584bbadb796 Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Thu, 11 Jul 2024 16:28:59 +0200 Subject: [PATCH 045/406] Fix issue with returning incomplete fragment for plain highlighter. (#110707) (#110756) * Fix issue with noMatchSize for plain highlighter * Update docs/changelog/110707.yaml --- docs/changelog/110707.yaml | 5 +++++ .../fetch/subphase/highlight/HighlighterSearchIT.java | 9 +++++++++ .../fetch/subphase/highlight/PlainHighlighter.java | 3 +++ 3 files changed, 17 insertions(+) create mode 100644 docs/changelog/110707.yaml diff --git a/docs/changelog/110707.yaml b/docs/changelog/110707.yaml new file mode 100644 index 0000000000000..e13688c73c743 --- /dev/null +++ b/docs/changelog/110707.yaml @@ -0,0 +1,5 @@ +pr: 110707 +summary: Fix issue with returning incomplete fragment for plain highlighter +area: Highlighting +type: bug +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 0a6fceea9a3f1..d9d6979ffd710 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -2177,6 +2177,15 @@ public void testHighlightNoMatchSize() throws IOException { field.highlighterType("unified"); assertNotHighlighted(prepareSearch("test").highlighter(new HighlightBuilder().field(field)), 0, "text"); + + // Check when the requested fragment size equals the size of the string + var anotherText = "I am unusual and don't end with your regular )token)"; + indexDoc("test", "1", "text", anotherText); + refresh(); + for (String type : new String[] { "plain", "unified", "fvh" }) { + field.highlighterType(type).noMatchSize(anotherText.length()).numOfFragments(0); + assertHighlight(prepareSearch("test").highlighter(new HighlightBuilder().field(field)), 0, "text", 0, 1, equalTo(anotherText)); + } } public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java index e7fa0e67cb453..3d180dd094b18 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -218,6 +218,9 @@ private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, Analyzer an // Can't split on term boundaries without offsets return -1; } + if (contents.length() <= noMatchSize) { + return contents.length(); + } int end = -1; tokenStream.reset(); while (tokenStream.incrementToken()) { From 525c5d2b9df0703a04272a981d22f80753576b22 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 11 Jul 2024 08:35:21 -0600 Subject: [PATCH 046/406] (Doc+) Include cluster.blocks.* default settings (#110560) (#110760) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋 howdy, team! Mini doc update to include default settings for `cluster.blocks.*` settings. --- docs/reference/modules/cluster/misc.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 3da5df4f16414..75eaca88c66b1 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -11,12 +11,12 @@ An entire cluster may be set to read-only with the following setting: (<>) Make the whole cluster read only (indices do not accept write operations), metadata is not allowed to be modified (create or delete - indices). + indices). Defaults to `false`. `cluster.blocks.read_only_allow_delete`:: (<>) Identical to `cluster.blocks.read_only` but allows to delete indices - to free up resources. + to free up resources. Defaults to `false`. WARNING: Don't rely on this setting to prevent changes to your cluster. Any user with access to the <> From 5bce26ca744683445a8ed4c061e693ffb6c75d58 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 11 Jul 2024 16:42:17 +0200 Subject: [PATCH 047/406] [CI] Do not cache any es distros when creating ci images (#110742) (#110745) (cherry picked from commit 816cedc21773dad5f0898bc97b01d0ad54f4937c) # Conflicts: # qa/packaging/build.gradle --- qa/packaging/build.gradle | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/qa/packaging/build.gradle b/qa/packaging/build.gradle index 758dfe6661766..d1890e8c49fcf 100644 --- a/qa/packaging/build.gradle +++ b/qa/packaging/build.gradle @@ -36,3 +36,8 @@ tasks.named("test").configure { enabled = false } tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } + +tasks.named('resolveAllDependencies') { + // avoid resolving all elasticsearch distros + enabled = false +} From 94d030f907f7a0dbf89e1c42ccd835485ee2788a Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Thu, 11 Jul 2024 16:45:15 +0200 Subject: [PATCH 048/406] Document how to query for a specific feature within rank_features (#110749) (#110775) --- docs/reference/mapping/types/rank-features.asciidoc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/reference/mapping/types/rank-features.asciidoc b/docs/reference/mapping/types/rank-features.asciidoc index b54e99ede3fae..25d5278ca220d 100644 --- a/docs/reference/mapping/types/rank-features.asciidoc +++ b/docs/reference/mapping/types/rank-features.asciidoc @@ -70,6 +70,15 @@ GET my-index-000001/_search } } } + +GET my-index-000001/_search +{ + "query": { <6> + "term": { + "topics": "economics" + } + } +} -------------------------------------------------- <1> Rank features fields must use the `rank_features` field type @@ -77,6 +86,7 @@ GET my-index-000001/_search <3> Rank features fields must be a hash with string keys and strictly positive numeric values <4> This query ranks documents by how much they are about the "politics" topic. <5> This query ranks documents inversely to the number of "1star" reviews they received. +<6> This query returns documents that store the "economics" feature in the "topics" field. NOTE: `rank_features` fields only support single-valued features and strictly From 0ee975bed06b16996221da7d92f6abe6bf02b93e Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Thu, 11 Jul 2024 10:53:06 -0400 Subject: [PATCH 049/406] [ESQL] Fix parsing of large magnitude negative numbers (#110665) (#110758) Resolves https://github.com/elastic/elasticsearch/issues/104323 This fixes and adds tests for the first of the two bullets in the linked issue. `ExpressionBuilder#visitIntegerValue` will attempt to parse a string as an integral value, and return a Literal of the appropriate type. The actual parsing happens in `StringUtils#parseIntegral`. That function has special handling for values that are larger than `Long.MAX_VALUE` where it attempts to turn them into unsigned longs, and if the number is still out of range, throw `InvalidArgumentException`. `ExpressionBuilder` catches that `InvalidArgumentException` and tries to parse a `double` instead. If, on the other hand, the value is smaller than `Long.MIN_VALUE`, `StringUtils` never enters the unsigned long path and just calls `intValueExact`, which throws `ArithmeticException`. This PR solves the issue by catching that `ArithmeticException` and rethrowing it as an `InvalidArgumentException`. --- docs/changelog/110665.yaml | 6 ++++++ .../xpack/esql/core/util/StringUtils.java | 3 +++ .../testFixtures/src/main/resources/floats.csv-spec | 7 +++++++ .../xpack/esql/action/EsqlCapabilities.java | 8 +++++++- .../xpack/esql/parser/StatementParserTests.java | 11 +++++++++++ 5 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/110665.yaml diff --git a/docs/changelog/110665.yaml b/docs/changelog/110665.yaml new file mode 100644 index 0000000000000..fa6db3190fe60 --- /dev/null +++ b/docs/changelog/110665.yaml @@ -0,0 +1,6 @@ +pr: 110665 +summary: "[ESQL] Fix parsing of large magnitude negative numbers" +area: ES|QL +type: bug +issues: + - 104323 diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java index 47246a4e190dd..4ba3658697c0d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java @@ -354,6 +354,9 @@ public static Number parseIntegral(String string) throws InvalidArgumentExceptio } return bi; } + if (bi.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + throw new InvalidArgumentException("Magnitude of negative number [{}] is too large", string); + } // try to downsize to int if possible (since that's the most common type) if (bi.intValue() == bi.longValue()) { // ternary operator would always promote to Long return bi.intValueExact(); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 2ee7f783b7e97..537b69547c6be 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -1,4 +1,11 @@ // Floating point types-specific tests +parseLargeMagnitudeValues +required_capability: fix_parsing_large_negative_numbers +row a = 92233720368547758090, b = -9223372036854775809; + +a:double | b:double +9.223372036854776E+19 | -9.223372036854776E+18 +; inDouble from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002) | sort emp_no; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 07362311d37a5..2d3b7255caedd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -106,7 +106,13 @@ public enum Cap { /** * Support for WEIGHTED_AVG function. */ - AGG_WEIGHTED_AVG; + AGG_WEIGHTED_AVG, + + /** + * Fix a parsing issue where numbers below Long.MIN_VALUE threw an exception instead of parsing as doubles. + * see Parsing large numbers is inconsistent #104323 + */ + FIX_PARSING_LARGE_NEGATIVE_NUMBERS; private final boolean snapshotOnly; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 2e2ca4feafa41..fd046d8dd1cff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -103,6 +103,17 @@ public void testRowCommandHugeInt() { ); } + public void testRowCommandHugeNegativeInt() { + assertEquals( + new Row(EMPTY, List.of(new Alias(EMPTY, "c", literalDouble(-92233720368547758080d)))), + statement("row c = -92233720368547758080") + ); + assertEquals( + new Row(EMPTY, List.of(new Alias(EMPTY, "c", literalDouble(-18446744073709551616d)))), + statement("row c = -18446744073709551616") + ); + } + public void testRowCommandDouble() { assertEquals(new Row(EMPTY, List.of(new Alias(EMPTY, "c", literalDouble(1.0)))), statement("row c = 1.0")); } From 9092394b19dea9e0f20290a2571a82b1d3610987 Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Thu, 11 Jul 2024 12:01:08 -0400 Subject: [PATCH 050/406] Search coordinator uses event.ingested in cluster state to do rewrites (#110352) (#110782) Min/max range for the event.ingested timestamp field (part of Elastic Common Schema) was added to IndexMetadata in cluster state for searchable snapshots in #106252. This commit modifies the search coordinator to rewrite searches to MatchNone if the query searches a range of event.ingested that, from the min/max range in cluster state, is known to not overlap. This is the same behavior we currently have for the @timestamp field. --- docs/changelog/110352.yaml | 5 + .../TimestampFieldMapperServiceTests.java | 4 +- .../query/CoordinatorRewriteContext.java | 113 ++++- .../CoordinatorRewriteContextProvider.java | 30 +- .../index/query/RangeQueryBuilder.java | 6 +- .../indices/DateFieldRangeInfo.java | 51 +++ .../elasticsearch/indices/IndicesService.java | 19 +- .../indices/TimestampFieldMapperService.java | 56 ++- .../CanMatchPreFilterSearchPhaseTests.java | 340 ++++++++++++--- .../test/AbstractBuilderTestCase.java | 11 +- .../index/engine/frozen/FrozenIndexIT.java | 163 ++++++- ...pshotsCanMatchOnCoordinatorIntegTests.java | 409 ++++++++++++++++-- 12 files changed, 1034 insertions(+), 173 deletions(-) create mode 100644 docs/changelog/110352.yaml create mode 100644 server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java diff --git a/docs/changelog/110352.yaml b/docs/changelog/110352.yaml new file mode 100644 index 0000000000000..7dad1ce5f6dd4 --- /dev/null +++ b/docs/changelog/110352.yaml @@ -0,0 +1,5 @@ +pr: 110352 +summary: Search coordinator uses `event.ingested` in cluster state to do rewrites +area: Search +type: enhancement +issues: [] diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 97959fa385241..eb35c44d30331 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -61,7 +61,7 @@ public void testGetTimestampFieldTypeForTsdbDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, notNullValue()); } @@ -70,7 +70,7 @@ public void testGetTimestampFieldTypeForDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, nullValue()); } diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index 2a1062f8876d2..f2fc7c1bd6cd0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -9,11 +9,14 @@ package org.elasticsearch.index.query; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.Collections; @@ -23,19 +26,24 @@ * Context object used to rewrite {@link QueryBuilder} instances into simplified version in the coordinator. * Instances of this object rely on information stored in the {@code IndexMetadata} for certain indices. * Right now this context object is able to rewrite range queries that include a known timestamp field - * (i.e. the timestamp field for DataStreams) into a MatchNoneQueryBuilder and skip the shards that - * don't hold queried data. See IndexMetadata#getTimestampRange() for more details + * (i.e. the timestamp field for DataStreams or the 'event.ingested' field in ECS) into a MatchNoneQueryBuilder + * and skip the shards that don't hold queried data. See IndexMetadata for more details. */ public class CoordinatorRewriteContext extends QueryRewriteContext { - private final IndexLongFieldRange indexLongFieldRange; - private final DateFieldMapper.DateFieldType timestampFieldType; + private final DateFieldRangeInfo dateFieldRangeInfo; + /** + * Context for coordinator search rewrites based on time ranges for the @timestamp field and/or 'event.ingested' field + * @param parserConfig + * @param client + * @param nowInMillis + * @param dateFieldRangeInfo range and field type info for @timestamp and 'event.ingested' + */ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - IndexLongFieldRange indexLongFieldRange, - DateFieldMapper.DateFieldType timestampFieldType + DateFieldRangeInfo dateFieldRangeInfo ) { super( parserConfig, @@ -53,29 +61,98 @@ public CoordinatorRewriteContext( null, null ); - this.indexLongFieldRange = indexLongFieldRange; - this.timestampFieldType = timestampFieldType; + this.dateFieldRangeInfo = dateFieldRangeInfo; } - long getMinTimestamp() { - return indexLongFieldRange.getMin(); + /** + * Get min timestamp for either '@timestamp' or 'event.ingested' fields. Any other field + * passed in will cause an {@link IllegalArgumentException} to be thrown, as these are the only + * two fields supported for coordinator rewrites (based on time range). + * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return min timestamp for the field from IndexMetadata in cluster state. + */ + long getMinTimestamp(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampRange().getMin(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedRange().getMin(); + } else { + throw new IllegalArgumentException( + Strings.format( + "Only [%s] or [%s] fields are supported for min timestamp coordinator rewrites, but got: [%s]", + DataStream.TIMESTAMP_FIELD_NAME, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + fieldName + ) + ); + } } - long getMaxTimestamp() { - return indexLongFieldRange.getMax(); + /** + * Get max timestamp for either '@timestamp' or 'event.ingested' fields. Any other field + * passed in will cause an {@link IllegalArgumentException} to be thrown, as these are the only + * two fields supported for coordinator rewrites (based on time range). + * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return max timestamp for the field from IndexMetadata in cluster state. + */ + long getMaxTimestamp(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampRange().getMax(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedRange().getMax(); + } else { + throw new IllegalArgumentException( + Strings.format( + "Only [%s] or [%s] fields are supported for max timestamp coordinator rewrites, but got: [%s]", + DataStream.TIMESTAMP_FIELD_NAME, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + fieldName + ) + ); + } } - boolean hasTimestampData() { - return indexLongFieldRange.isComplete() && indexLongFieldRange != IndexLongFieldRange.EMPTY; + /** + * Determine whether either '@timestamp' or 'event.ingested' fields has useful timestamp ranges + * stored in cluster state for this context. + * Any other fieldname will cause an {@link IllegalArgumentException} to be thrown, as these are the only + * two fields supported for coordinator rewrites (based on time range). + * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return min timestamp for the field from IndexMetadata in cluster state. + */ + boolean hasTimestampData(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampRange().isComplete() + && dateFieldRangeInfo.getTimestampRange() != IndexLongFieldRange.EMPTY; + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedRange().isComplete() + && dateFieldRangeInfo.getEventIngestedRange() != IndexLongFieldRange.EMPTY; + } else { + throw new IllegalArgumentException( + Strings.format( + "Only [%s] or [%s] fields are supported for min/max timestamp coordinator rewrites, but got: [%s]", + DataStream.TIMESTAMP_FIELD_NAME, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + fieldName + ) + ); + } } + /** + * @param fieldName Get MappedFieldType for either '@timestamp' or 'event.ingested' fields. + * @return min timestamp for the field from IndexMetadata in cluster state or null if fieldName was not + * DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME. + */ @Nullable public MappedFieldType getFieldType(String fieldName) { - if (fieldName.equals(timestampFieldType.name()) == false) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampFieldType(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedFieldType(); + } else { return null; } - - return timestampFieldType; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index e44861b4afe8a..8251b82c05af2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.function.Function; @@ -25,14 +26,14 @@ public class CoordinatorRewriteContextProvider { private final Client client; private final LongSupplier nowInMillis; private final Supplier clusterStateSupplier; - private final Function mappingSupplier; + private final Function mappingSupplier; public CoordinatorRewriteContextProvider( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, Supplier clusterStateSupplier, - Function mappingSupplier + Function mappingSupplier ) { this.parserConfig = parserConfig; this.client = client; @@ -49,18 +50,33 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { if (indexMetadata == null) { return null; } - DateFieldMapper.DateFieldType dateFieldType = mappingSupplier.apply(index); - if (dateFieldType == null) { + + DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); + if (dateFieldRangeInfo == null) { return null; } + + DateFieldMapper.DateFieldType timestampFieldType = dateFieldRangeInfo.getTimestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = dateFieldRangeInfo.getEventIngestedFieldType(); IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + if (timestampRange.containsAllShardRanges() == false) { - timestampRange = indexMetadata.getTimeSeriesTimestampRange(dateFieldType); - if (timestampRange == null) { + // if @timestamp range is not present or not ready in cluster state, fallback to using time series range (if present) + timestampRange = indexMetadata.getTimeSeriesTimestampRange(timestampFieldType); + // if timestampRange in the time series is null AND the eventIngestedRange is not ready for use, return null (no coord rewrite) + if (timestampRange == null && eventIngestedRange.containsAllShardRanges() == false) { return null; } } - return new CoordinatorRewriteContext(parserConfig, client, nowInMillis, timestampRange, dateFieldType); + // the DateFieldRangeInfo from the mappingSupplier only has field types, but not ranges + // so create a new object with ranges pulled from cluster state + return new CoordinatorRewriteContext( + parserConfig, + client, + nowInMillis, + new DateFieldRangeInfo(timestampFieldType, timestampRange, eventIngestedFieldType, eventIngestedRange) + ); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 4d2a6d3eaecdb..ac7fae8ec0145 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -436,11 +436,11 @@ public String getWriteableName() { protected MappedFieldType.Relation getRelation(final CoordinatorRewriteContext coordinatorRewriteContext) { final MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(fieldName); if (fieldType instanceof final DateFieldMapper.DateFieldType dateFieldType) { - if (coordinatorRewriteContext.hasTimestampData() == false) { + if (coordinatorRewriteContext.hasTimestampData(fieldName) == false) { return MappedFieldType.Relation.DISJOINT; } - long minTimestamp = coordinatorRewriteContext.getMinTimestamp(); - long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(); + long minTimestamp = coordinatorRewriteContext.getMinTimestamp(fieldName); + long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(fieldName); DateMathParser dateMathParser = getForceDateParser(); return dateFieldType.isFieldWithinQuery( minTimestamp, diff --git a/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java new file mode 100644 index 0000000000000..ddeb3f370be12 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.shard.IndexLongFieldRange; + +/** + * Data holder of timestamp fields held in cluster state IndexMetadata. + */ +public final class DateFieldRangeInfo { + + private final DateFieldMapper.DateFieldType timestampFieldType; + private final IndexLongFieldRange timestampRange; + private final DateFieldMapper.DateFieldType eventIngestedFieldType; + private final IndexLongFieldRange eventIngestedRange; + + public DateFieldRangeInfo( + DateFieldMapper.DateFieldType timestampFieldType, + IndexLongFieldRange timestampRange, + DateFieldMapper.DateFieldType eventIngestedFieldType, + IndexLongFieldRange eventIngestedRange + ) { + this.timestampFieldType = timestampFieldType; + this.timestampRange = timestampRange; + this.eventIngestedFieldType = eventIngestedFieldType; + this.eventIngestedRange = eventIngestedRange; + } + + public DateFieldMapper.DateFieldType getTimestampFieldType() { + return timestampFieldType; + } + + public IndexLongFieldRange getTimestampRange() { + return timestampRange; + } + + public DateFieldMapper.DateFieldType getEventIngestedFieldType() { + return eventIngestedFieldType; + } + + public IndexLongFieldRange getEventIngestedRange() { + return eventIngestedRange; + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 0d81d24e64646..203d7d5a0aba8 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -98,7 +98,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; @@ -1764,7 +1763,13 @@ public DataRewriteContext getDataRewriteContext(LongSupplier nowInMillis) { } public CoordinatorRewriteContextProvider getCoordinatorRewriteContextProvider(LongSupplier nowInMillis) { - return new CoordinatorRewriteContextProvider(parserConfig, client, nowInMillis, clusterService::state, this::getTimestampFieldType); + return new CoordinatorRewriteContextProvider( + parserConfig, + client, + nowInMillis, + clusterService::state, + this::getTimestampFieldTypeInfo + ); } /** @@ -1854,14 +1859,16 @@ public boolean allPendingDanglingIndicesWritten() { } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, or - * - the field is not a timestamp field. + * - the mapping is not known yet, or + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - return timestampFieldMapperService.getTimestampFieldType(index); + public DateFieldRangeInfo getTimestampFieldTypeInfo(Index index) { + return timestampFieldMapperService.getTimestampFieldTypeMap(index); } public IndexScopedSettings getIndexScopedSettings() { diff --git a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java index 4caeaef6514e5..9b23762e29490 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java @@ -42,8 +42,9 @@ import static org.elasticsearch.core.Strings.format; /** - * Tracks the mapping of the {@code @timestamp} field of immutable indices that expose their timestamp range in their index metadata. - * Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of this one field from the mapping here. + * Tracks the mapping of the '@timestamp' and 'event.ingested' fields of immutable indices that expose their timestamp range in their + * index metadata. Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of these two fields + * from the mapping here, since timestamp fields can have millis or nanos level resolution. */ public class TimestampFieldMapperService extends AbstractLifecycleComponent implements ClusterStateApplier { @@ -53,10 +54,12 @@ public class TimestampFieldMapperService extends AbstractLifecycleComponent impl private final ExecutorService executor; // single thread to construct mapper services async as needed /** - * The type of the {@code @timestamp} field keyed by index. Futures may be completed with {@code null} to indicate that there is - * no usable {@code @timestamp} field. + * The type of the 'event.ingested' and/or '@timestamp' fields keyed by index. + * The inner map is keyed by field name ('@timestamp' or 'event.ingested'). + * Futures may be completed with {@code null} to indicate that there is + * no usable timestamp field. */ - private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); + private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); public TimestampFieldMapperService(Settings settings, ThreadPool threadPool, IndicesService indicesService) { this.indicesService = indicesService; @@ -102,8 +105,8 @@ public void applyClusterState(ClusterChangedEvent event) { final Index index = indexMetadata.getIndex(); if (hasUsefulTimestampField(indexMetadata) && fieldTypesByIndex.containsKey(index) == false) { - logger.trace("computing timestamp mapping for {}", index); - final PlainActionFuture future = new PlainActionFuture<>(); + logger.trace("computing timestamp mapping(s) for {}", index); + final PlainActionFuture future = new PlainActionFuture<>(); fieldTypesByIndex.put(index, future); final IndexService indexService = indicesService.indexService(index); @@ -148,29 +151,45 @@ private static boolean hasUsefulTimestampField(IndexMetadata indexMetadata) { return true; } - final IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); - return timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN; + IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + if (timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN) { + return true; + } + + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + return eventIngestedRange.isComplete() && eventIngestedRange != IndexLongFieldRange.UNKNOWN; } - private static DateFieldMapper.DateFieldType fromMapperService(MapperService mapperService) { - final MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); - if (mappedFieldType instanceof DateFieldMapper.DateFieldType) { - return (DateFieldMapper.DateFieldType) mappedFieldType; - } else { + private static DateFieldRangeInfo fromMapperService(MapperService mapperService) { + DateFieldMapper.DateFieldType timestampFieldType = null; + DateFieldMapper.DateFieldType eventIngestedFieldType = null; + + MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + timestampFieldType = dateFieldType; + } + mappedFieldType = mapperService.fieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + eventIngestedFieldType = dateFieldType; + } + if (timestampFieldType == null && eventIngestedFieldType == null) { return null; } + // the mapper only fills in the field types, not the actual range values + return new DateFieldRangeInfo(timestampFieldType, null, eventIngestedFieldType, null); } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, * - the mapping is not known yet, or - * - the field is not a timestamp field. + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - final PlainActionFuture future = fieldTypesByIndex.get(index); + public DateFieldRangeInfo getTimestampFieldTypeMap(Index index) { + final PlainActionFuture future = fieldTypesByIndex.get(index); if (future == null || future.isDone() == false) { return null; } @@ -181,5 +200,4 @@ public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { throw new UncategorizedExecutionException("An error occurred fetching timestamp field type for " + index, e); } } - } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 70c4d73f578b3..e61d86bbf2a58 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.CanMatchNodeResponse.ResponseOrFailure; @@ -26,8 +27,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -38,6 +37,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -72,6 +72,7 @@ import static org.elasticsearch.action.search.SearchAsyncActionTests.getShardsIter; import static org.elasticsearch.core.Types.forciblyCast; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; @@ -464,7 +465,17 @@ public void sendCanMatch( } } - public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exception { + // test using @timestamp + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingTimestamp() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(DataStream.TIMESTAMP_FIELD_NAME); + } + + // test using event.ingested + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingEventIngested() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + } + + public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampField) throws Exception { Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); @@ -475,15 +486,10 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timestampField, indexMinTimestamp, indexMaxTimestamp); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timestampField); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -535,26 +541,107 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio ); } - public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { - Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); - Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + public void testCoordinatorCanMatchFilteringThatCanBeSkippedUsingBothTimestamps() throws Exception { + Index dataStreamIndex1 = new Index(".ds-twoTimestamps0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-twoTimestamps0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); - List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + List regularIndices = randomList(1, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); long indexMinTimestamp = randomLongBetween(0, 5000); long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( + // use same range for both @timestamp and event.ingested + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, + indexMinTimestamp, + indexMaxTimestamp, indexMinTimestamp, indexMaxTimestamp ); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + /** + * Expected behavior: if either @timestamp or 'event.ingested' filters in the query are "out of range" (do not + * overlap the range in cluster state), then all shards in the datastream should be skipped. + * Only if both @timestamp or 'event.ingested' filters are "in range" should the data stream shards be searched + */ + boolean timestampQueryOutOfRange = randomBoolean(); + boolean eventIngestedQueryOutOfRange = randomBoolean(); + int timestampOffset = timestampQueryOutOfRange ? 1 : -500; + int eventIngestedOffset = eventIngestedQueryOutOfRange ? 1 : -500; + + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMaxTimestamp + timestampOffset).to(indexMaxTimestamp + 2); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMaxTimestamp + eventIngestedOffset).to(indexMaxTimestamp + 2); + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + (updatedSearchShardIterators, requests) -> { + List skippedShards = updatedSearchShardIterators.stream().filter(SearchShardIterator::skip).toList(); + List nonSkippedShards = updatedSearchShardIterators.stream() + .filter(searchShardIterator -> searchShardIterator.skip() == false) + .toList(); + + if (timestampQueryOutOfRange || eventIngestedQueryOutOfRange) { + // data stream shards should have been skipped + assertThat(skippedShards.size(), greaterThan(0)); + boolean allSkippedShardAreFromDataStream = skippedShards.stream() + .allMatch(shardIterator -> dataStream.getIndices().contains(shardIterator.shardId().getIndex())); + assertThat(allSkippedShardAreFromDataStream, equalTo(true)); + + boolean allNonSkippedShardsAreFromRegularIndices = nonSkippedShards.stream() + .allMatch(shardIterator -> regularIndices.contains(shardIterator.shardId().getIndex())); + assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); + + boolean allRequestsWereTriggeredAgainstRegularIndices = requests.stream() + .allMatch(request -> regularIndices.contains(request.shardId().getIndex())); + assertThat(allRequestsWereTriggeredAgainstRegularIndices, equalTo(true)); + + } else { + assertThat(skippedShards.size(), equalTo(0)); + long countSkippedShardsFromDatastream = nonSkippedShards.stream() + .filter(iter -> dataStream.getIndices().contains(iter.shardId().getIndex())) + .count(); + assertThat(countSkippedShardsFromDatastream, greaterThan(0L)); + } + } + ); + } + + public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + long indexMinTimestamp = randomLongBetween(0, 5000); + long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); + } + + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // Query with a non default date format rangeQueryBuilder.from("2020-1-01").to("2021-1-01"); @@ -585,23 +672,20 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + long indexMinTimestamp = 10; long indexMaxTimestamp = 20; StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); } BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); // Query inside of the data stream index range if (randomBoolean()) { // Query generation - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // We query a range within the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMinTimestamp).to(indexMaxTimestamp); @@ -614,8 +698,7 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep } } else { // We query a range outside of the timestamp range covered by both datastream indices - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestamp + 1) - .to(indexMaxTimestamp + 2); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField).from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); @@ -635,17 +718,86 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep ); } + public void testCanMatchFilteringOnCoordinatorWithTimestampAndEventIngestedThatCanNotBeSkipped() throws Exception { + // Generate indices + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + long indexMinTimestampForTs = 10; + long indexMaxTimestampForTs = 20; + long indexMinTimestampForEventIngested = 10; + long indexMaxTimestampForEventIngested = 20; + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( + dataStreamIndex, + indexMinTimestampForTs, + indexMaxTimestampForTs, + indexMinTimestampForEventIngested, + indexMaxTimestampForEventIngested + ); + } + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); + // Query inside of the data stream index range + if (randomBoolean()) { + // Query generation + // We query a range within both timestamp ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMinTimestampForTs).to(indexMaxTimestampForTs); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMinTimestampForEventIngested).to(indexMaxTimestampForEventIngested); + + queryBuilder.filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + } else { + // We query a range outside of the both ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestampForTs + 1) + .to(indexMaxTimestampForTs + 2); + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME).from( + indexMaxTimestampForEventIngested + 1 + ).to(indexMaxTimestampForEventIngested + 2); + + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); + + // This is always evaluated as true in the coordinator as we cannot determine there if + // the term query clause is false. + queryBuilder.should(tsRangeQueryBuilder).should(eventIngestedRangeQueryBuilder).should(termQueryBuilder); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + this::assertAllShardsAreQueried + ); + } + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withDefaultBackgroundFilter() throws Exception { Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timeField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timeField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timeField, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timeField).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms"); assignShardsAndExecuteCanMatchPhase( @@ -661,20 +813,22 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w } public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withBackgroundFilter() throws Exception { + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); Index index4 = new Index("index4", UUIDs.base64UUID()); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); - contextProviderBuilder.addIndexMinMaxTimestamps(index4, DataStream.TIMESTAMP_FIELD_NAME, 3000, 3999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index4, timestampField, 3000, 3999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(3100).to(3200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(3100).to(3200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(0).to(1999) + new RangeQueryBuilder(timestampField).from(0).to(1999) ); assignShardsAndExecuteCanMatchPhase( @@ -703,14 +857,53 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2000).to(2300) + new RangeQueryBuilder(timestampField).from(2000).to(2300) + ); + SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); + + assignShardsAndExecuteCanMatchPhase( + List.of(), + List.of(index1, index2, index3), + contextProviderBuilder.build(), + query, + List.of(aggregation), + suggest, + // The query and aggregation and match only index3, but suggest should match everything. + this::assertAllShardsAreQueried + ); + } + + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withSuggest_withTwoTimestamps() throws Exception { + Index index1 = new Index("index1", UUIDs.base64UUID()); + Index index2 = new Index("index2", UUIDs.base64UUID()); + Index index3 = new Index("index3", UUIDs.base64UUID()); + + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index1, 0, 999, 0, 999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index2, 1000, 1999, 1000, 1999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index3, 2000, 2999, 2000, 2999); + + String fieldInRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + String fieldOutOfRange = DataStream.TIMESTAMP_FIELD_NAME; + + if (randomBoolean()) { + fieldInRange = DataStream.TIMESTAMP_FIELD_NAME; + fieldOutOfRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + } + + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(fieldInRange).from(2100).to(2200)) + .filter(new RangeQueryBuilder(fieldOutOfRange).from(8888).to(9999)); + AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( + new RangeQueryBuilder(fieldInRange).from(2000).to(2300) ); SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); @@ -744,13 +937,13 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedTsdb() throws Exce long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index index : dataStream1.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(index, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps(index, DataStream.TIMESTAMP_FIELD_NAME, indexMinTimestamp, indexMaxTimestamp); } for (Index index : dataStream2.getIndices()) { contextProviderBuilder.addIndex(index); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp"); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -954,9 +1147,9 @@ public void sendCanMatch( canMatchResultsConsumer.accept(updatedSearchShardIterators, requests); } - private static class StaticCoordinatorRewriteContextProviderBuilder { + static class StaticCoordinatorRewriteContextProviderBuilder { private ClusterState clusterState = ClusterState.EMPTY_STATE; - private final Map fields = new HashMap<>(); + private final Map fields = new HashMap<>(); private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTimeStamp, long maxTimestamp) { if (clusterState.metadata().index(index) != null) { @@ -974,35 +1167,64 @@ private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTim IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0) - .timestampRange(timestampRange); + .numberOfReplicas(0); + if (fieldName.equals(DataStream.TIMESTAMP_FIELD_NAME)) { + indexMetadataBuilder.timestampRange(timestampRange); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(fieldName), null, null, null)); + } else if (fieldName.equals(IndexMetadata.EVENT_INGESTED_FIELD_NAME)) { + indexMetadataBuilder.eventIngestedRange(timestampRange, TransportVersion.current()); + fields.put(index, new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(fieldName), null)); + } Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); - clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - - fields.put(index, new DateFieldMapper.DateFieldType(fieldName)); } - private void addIndexMinMaxTimestamps(Index index, long minTimestamp, long maxTimestamp) { + /** + * Add min/max timestamps to IndexMetadata for the specified index for both @timestamp and 'event.ingested' + */ + private void addIndexMinMaxForTimestampAndEventIngested( + Index index, + long minTimestampForTs, + long maxTimestampForTs, + long minTimestampForEventIngested, + long maxTimestampForEventIngested + ) { if (clusterState.metadata().index(index) != null) { throw new IllegalArgumentException("Min/Max timestamps for " + index + " were already defined"); } - Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "a_field") - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(minTimestamp)) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(maxTimestamp)); + IndexLongFieldRange tsTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForTs, maxTimestampForTs) + ); + IndexLongFieldRange eventIngestedTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForEventIngested, maxTimestampForEventIngested) + ); + + Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0); + .numberOfReplicas(0) + .timestampRange(tsTimestampRange) + .eventIngestedRange(eventIngestedTimestampRange, TransportVersion.current()); Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put( + index, + new DateFieldRangeInfo( + new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), + null, + new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), + null + ) + ); } private void addIndex(Index index) { @@ -1018,7 +1240,7 @@ private void addIndex(Index index) { Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), null, null, null)); } public CoordinatorRewriteContextProvider build() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 271df2a971fb1..a2d93bab3a505 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -59,6 +59,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -622,13 +623,13 @@ QueryRewriteContext createQueryRewriteContext() { } CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max) { - return new CoordinatorRewriteContext( - parserConfiguration, - this.client, - () -> nowInMillis, + DateFieldRangeInfo timestampFieldInfo = new DateFieldRangeInfo( + dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)), - dateFieldType + dateFieldType, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)) ); + return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo); } DataRewriteContext createDataContext() { diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index 36d4751423113..6d962ec5baceb 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; @@ -44,6 +45,7 @@ import java.time.Instant; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING; @@ -76,8 +78,15 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx createIndex("index", 1, 1); - final DocWriteResponse indexResponse = prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z") - .get(); + String timestampVal = "2010-01-06T02:03:04.567Z"; + String eventIngestedVal = "2010-01-06T02:03:05.567Z"; // one second later + + final DocWriteResponse indexResponse = prepareIndex("index").setSource( + DataStream.TIMESTAMP_FIELD_NAME, + timestampVal, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedVal + ).get(); ensureGreen("index"); @@ -117,13 +126,23 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertTrue(timestampFieldRange.isComplete()); - assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); - assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse(timestampVal).toEpochMilli())); + assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse(timestampVal).toEpochMilli())); - assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + IndexLongFieldRange eventIngestedFieldRange = clusterAdmin().prepareState() + .get() + .getState() + .metadata() + .index("index") + .getEventIngestedRange(); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(eventIngestedFieldRange.isComplete()); + assertThat(eventIngestedFieldRange.getMin(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); + assertThat(eventIngestedFieldRange.getMax(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); } - public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception { + public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { internalCluster().startNodes(between(2, 4)); final String locale; @@ -181,11 +200,11 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ensureGreen("index"); if (randomBoolean()) { - prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date).get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date, IndexMetadata.EVENT_INGESTED_FIELD_NAME, date).get(); } for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } assertAcked( @@ -193,15 +212,129 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); assertBusy(() -> { - final DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(index); + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.getTimestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.getEventIngestedFieldType(); + assertNotNull(eventIngestedFieldType); assertNotNull(timestampFieldType); - timestampFieldTypeFuture.onResponse(timestampFieldType); + future.onResponse( + Map.of( + DataStream.TIMESTAMP_FIELD_NAME, + timestampFieldType, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedFieldType + ) + ); + }); + assertTrue(future.isDone()); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertThat(future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat( + future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().parseMillis(date), + equalTo(1580817683000L) + ); + } + + assertAcked( + client().execute( + FreezeIndexAction.INSTANCE, + new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index").setFreeze(false) + ).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + } + + public void testTimestampOrEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { + internalCluster().startNodes(between(2, 4)); + + final String locale; + final String date; + + switch (between(1, 3)) { + case 1 -> { + locale = ""; + date = "04 Feb 2020 12:01:23Z"; + } + case 2 -> { + locale = "en_GB"; + date = "04 Feb 2020 12:01:23Z"; + } + case 3 -> { + locale = "fr_FR"; + date = "04 févr. 2020 12:01:23Z"; + } + default -> throw new AssertionError("impossible"); + } + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + assertAcked( + prepareCreate("index").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject(timeField) + .field("type", "date") + .field("format", "dd LLL yyyy HH:mm:ssX") + .field("locale", locale) + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + + final Index index = clusterAdmin().prepareState() + .clear() + .setIndices("index") + .setMetadata(true) + .get() + .getState() + .metadata() + .index("index") + .getIndex(); + + ensureGreen("index"); + if (randomBoolean()) { + prepareIndex("index").setSource(timeField, date).get(); + } + + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + + assertAcked( + client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + // final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); + assertBusy(() -> { + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.getTimestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.getEventIngestedFieldType(); + if (timeField == DataStream.TIMESTAMP_FIELD_NAME) { + assertNotNull(timestampFieldType); + assertNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, timestampFieldType)); + } else { + assertNull(timestampFieldType); + assertNotNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, eventIngestedFieldType)); + } }); - assertTrue(timestampFieldTypeFuture.isDone()); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().locale().toString(), equalTo(locale)); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertTrue(future.isDone()); + assertThat(future.get().get(timeField).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(timeField).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); } assertAcked( @@ -212,7 +345,7 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 5204bdfcc78e6..6dfe1c5835285 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; @@ -100,11 +101,11 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + createIndexWithTimestampAndEventIngested(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -117,11 +118,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Either add data outside of the range, or documents that don't have timestamp data final boolean indexDataWithTimestamp = randomBoolean(); // Add enough documents to have non-metadata segment files in all shards, - // otherwise the mount operation might go through as the read won't be - // blocked + // otherwise the mount operation might go through as the read won't be blocked final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); if (indexDataWithTimestamp) { - indexDocumentsWithTimestampWithinDate( + indexDocumentsWithTimestampAndEventIngestedDates( indexOutsideSearchRange, numberOfDocsInIndexOutsideSearchRange, TIMESTAMP_TEMPLATE_OUTSIDE_RANGE @@ -132,7 +132,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Index enough documents to ensure that all shards have at least some documents int numDocsWithinRange = between(100, 1000); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -166,9 +166,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); final boolean includeIndexCoveringSearchRangeInSearchRequest = randomBoolean(); List indicesToSearch = new ArrayList<>(); @@ -176,7 +177,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying indicesToSearch.add(indexWithinSearchRange); } indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -250,20 +253,44 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + + // check that @timestamp and 'event.ingested' are now in cluster state final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + final DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.getTimestampFieldType(); + assertThat(timestampDataFieldType, notNullValue()); + final DateFieldMapper.DateFieldType eventIngestedDataFieldType = timestampFieldTypeInfo.getEventIngestedFieldType(); + assertThat(eventIngestedDataFieldType, notNullValue()); + + final DateFieldMapper.Resolution timestampResolution = timestampDataFieldType.resolution(); + final DateFieldMapper.Resolution eventIngestedResolution = eventIngestedDataFieldType.resolution(); if (indexDataWithTimestamp) { assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertThat( updatedTimestampMillisRange.getMin(), - greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) ); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); } else { assertThat(updatedTimestampMillisRange, sameInstance(IndexLongFieldRange.EMPTY)); + assertThat(updatedEventIngestedRange, sameInstance(IndexLongFieldRange.EMPTY)); } // Stop the node holding the searchable snapshots, and since we defined @@ -383,6 +410,171 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } + /** + * Test shard skipping when only 'event.ingested' is in the index and cluster state. + */ + public void testEventIngestedRangeInSearchAgainstSearchableSnapshotShards() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode(); + final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode(); + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot); + + final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); + + final String timestampField = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + + createIndexWithOnlyOneTimestampField(timestampField, indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + + final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); + createIndexWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + indexWithinSearchRangeShardCount, + Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build() + ); + + final int totalShards = indexOutsideSearchRangeShardCount + indexWithinSearchRangeShardCount; + + // Add enough documents to have non-metadata segment files in all shards, + // otherwise the mount operation might go through as the read won't be blocked + final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); + + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexOutsideSearchRange, + numberOfDocsInIndexOutsideSearchRange, + TIMESTAMP_TEMPLATE_OUTSIDE_RANGE + ); + + // Index enough documents to ensure that all shards have at least some documents + int numDocsWithinRange = between(100, 1000); + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + numDocsWithinRange, + TIMESTAMP_TEMPLATE_WITHIN_RANGE + ); + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexOutsideSearchRange)).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indexOutsideSearchRange)); + + final String searchableSnapshotIndexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + // Block the repository for the node holding the searchable snapshot shards + // to delay its restore + blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot); + + // Force the searchable snapshot to be allocated in a particular node + Settings restoredIndexSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot) + .build(); + + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + searchableSnapshotIndexOutsideSearchRange, + repositoryName, + snapshotId.getName(), + indexOutsideSearchRange, + restoredIndexSettings, + Strings.EMPTY_ARRAY, + false, + randomFrom(MountSearchableSnapshotRequest.Storage.values()) + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + + final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + + // Allow the searchable snapshots to be finally mounted + unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot); + waitUntilRecoveryIsDone(searchableSnapshotIndexOutsideSearchRange); + ensureGreen(searchableSnapshotIndexOutsideSearchRange); + + IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); + IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + + // @timestamp range should be null since it was not included in the index or indexed docs + assertThat(updatedTimestampMillisRange, equalTo(IndexLongFieldRange.UNKNOWN)); + assertThat(updatedEventIngestedMillisRange, not(equalTo(IndexLongFieldRange.UNKNOWN))); + + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + + DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.getTimestampFieldType(); + assertThat(timestampDataFieldType, nullValue()); + + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampFieldTypeInfo.getEventIngestedFieldType(); + assertThat(eventIngestedFieldType, notNullValue()); + + DateFieldMapper.Resolution eventIngestedResolution = eventIngestedFieldType.resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + // now do a search against event.ingested + List indicesToSearch = new ArrayList<>(); + indicesToSearch.add(indexWithinSearchRange); + indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); + + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2020-11-28T00:00:00.000000000Z", true) + .to("2020-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + // All the regular index searches succeeded + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // All the searchable snapshots shards were skipped + assertThat(searchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchShardResult.notSkipped().size(), equalTo(indexWithinSearchRangeShardCount)); + } + + // query a range that covers both indexes - all shards should be searched, none skipped + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2019-11-28T00:00:00.000000000Z", true) + .to("2021-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(0)); + assertThat(searchShardResult.notSkipped().size(), equalTo(totalShards)); + } + } + /** * Can match against searchable snapshots is tested via both the Search API and the SearchShards (transport-only) API. * The latter is a way to do only a can-match rather than all search phases. @@ -396,7 +588,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.builder() @@ -404,7 +596,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() .build() ); - indexDocumentsWithTimestampWithinDate(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -438,11 +630,14 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + final String timestampField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -500,14 +695,29 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); - assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); - assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + + final IndexLongFieldRange updatedTimestampRange = updatedIndexMetadata.getTimestampRange(); + DateFieldMapper.Resolution tsResolution = timestampFieldTypeInfo.getTimestampFieldType().resolution(); + ; + assertThat(updatedTimestampRange.isComplete(), equalTo(true)); + assertThat(updatedTimestampRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat(updatedTimestampRange.getMin(), greaterThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); + assertThat(updatedTimestampRange.getMax(), lessThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.getEventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -579,7 +789,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -587,7 +797,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo .build() ); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -621,11 +831,13 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -680,13 +892,32 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + final DateFieldMapper.Resolution timestampResolution = timestampFieldTypeInfo.getTimestampFieldType().resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-28T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-29T00:00:00Z")))); + assertThat( + updatedTimestampMillisRange.getMin(), + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); + + final IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + final DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.getEventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -724,17 +955,24 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo } } - private void createIndexWithTimestamp(String indexName, int numShards, Settings extraSettings) throws IOException { + private void createIndexWithTimestampAndEventIngested(String indexName, int numShards, Settings extraSettings) throws IOException { assertAcked( indicesAdmin().prepareCreate(indexName) .setMapping( XContentFactory.jsonBuilder() .startObject() .startObject("properties") + .startObject(DataStream.TIMESTAMP_FIELD_NAME) .field("type", randomFrom("date", "date_nanos")) .field("format", "strict_date_optional_time_nanos") .endObject() + + .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + .endObject() .endObject() ) @@ -743,12 +981,70 @@ private void createIndexWithTimestamp(String indexName, int numShards, Settings ensureGreen(indexName); } - private void indexDocumentsWithTimestampWithinDate(String indexName, int docCount, String timestampTemplate) throws Exception { + private void createIndexWithOnlyOneTimestampField(String timestampField, String index, int numShards, Settings extraSettings) + throws IOException { + assertAcked( + indicesAdmin().prepareCreate(index) + .setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + + .startObject(timestampField) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + + .endObject() + .endObject() + ) + .setSettings(indexSettingsNoReplicas(numShards).put(INDEX_SOFT_DELETES_SETTING.getKey(), true).put(extraSettings)) + ); + ensureGreen(index); + } + + private void indexDocumentsWithOnlyOneTimestampField(String timestampField, String index, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + indexRequestBuilders.add( + prepareIndex(index).setSource( + timestampField, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ) + ) + ); + } + indexRandom(true, false, indexRequestBuilders); + + assertThat(indicesAdmin().prepareForceMerge(index).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), equalTo(0)); + refresh(index); + forceMerge(); + } + + private void indexDocumentsWithTimestampAndEventIngestedDates(String indexName, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < docCount; i++) { indexRequestBuilders.add( prepareIndex(indexName).setSource( DataStream.TIMESTAMP_FIELD_NAME, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ), + IndexMetadata.EVENT_INGESTED_FIELD_NAME, String.format( Locale.ROOT, timestampTemplate, @@ -789,4 +1085,39 @@ private void waitUntilRecoveryIsDone(String index) throws Exception { private void waitUntilAllShardsAreUnassigned(Index index) throws Exception { awaitClusterState(state -> state.getRoutingTable().index(index).allPrimaryShardsUnassigned()); } + + record SearchShardAPIResult(List skipped, List notSkipped) {} + + private static SearchShardAPIResult doSearchShardAPIQuery( + List indicesToSearch, + RangeQueryBuilder rangeQuery, + boolean allowPartialSearchResults, + int expectedTotalShards + ) { + SearchShardsRequest searchShardsRequest = new SearchShardsRequest( + indicesToSearch.toArray(new String[0]), + SearchRequest.DEFAULT_INDICES_OPTIONS, + rangeQuery, + null, + null, + allowPartialSearchResults, + null + ); + + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); + assertThat(searchShardsResponse.getGroups().size(), equalTo(expectedTotalShards)); + List> partitionedBySkipped = searchShardsResponse.getGroups() + .stream() + .collect( + Collectors.teeing( + Collectors.filtering(g -> g.skipped(), Collectors.toList()), + Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), + List::of + ) + ); + + List skipped = partitionedBySkipped.get(0); + List notSkipped = partitionedBySkipped.get(1); + return new SearchShardAPIResult(skipped, notSkipped); + } } From 1955f169ca13e431532c3a4d79fe55239110b81f Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 12 Jul 2024 10:59:33 +0200 Subject: [PATCH 051/406] [Gradle] Replace deprecated build scan api usage (#110783) (#110812) We updated to use develocity plugin a while ago but still used a deprecated api in the build complete logic we have running on ci --- .../gradle/internal/ElasticsearchBuildCompletePlugin.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index 4f9498c8f33a6..b513fd7b93631 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal; -import com.gradle.scan.plugin.BuildScanExtension; +import com.gradle.develocity.agent.gradle.DevelocityConfiguration; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; @@ -64,7 +64,7 @@ public void apply(Project target) { File targetFile = target.file("build/" + buildNumber + ".tar.bz2"); File projectDir = target.getProjectDir(); File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); - BuildScanExtension extension = target.getExtensions().getByType(BuildScanExtension.class); + DevelocityConfiguration extension = target.getExtensions().getByType(DevelocityConfiguration.class); File daemonsLogDir = new File(target.getGradle().getGradleUserHomeDir(), "daemon/" + target.getGradle().getGradleVersion()); getFlowScope().always(BuildFinishedFlowAction.class, spec -> { @@ -125,7 +125,7 @@ interface Parameters extends FlowParameters { ListProperty getFilteredFiles(); @Input - Property getBuildScan(); + Property getBuildScan(); } @@ -198,7 +198,7 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo + System.getenv("BUILDKITE_JOB_ID") + "/artifacts/" + artifactUuid; - parameters.getBuildScan().get().link("Artifact Upload", targetLink); + parameters.getBuildScan().get().getBuildScan().link("Artifact Upload", targetLink); } } catch (Exception e) { System.out.println("Failed to upload buildkite artifact " + e.getMessage()); From 9a3e52dd37404a45f766c488fdce0315163b96cf Mon Sep 17 00:00:00 2001 From: "Mark J. Hoy" Date: Fri, 12 Jul 2024 10:36:45 -0400 Subject: [PATCH 052/406] [Inference API] Add Docs for Amazon Bedrock Support for the Inference API (#110594) (#110832) * Add Amazon Bedrock Inference API to docs * fix example errors * update semantic search tutorial; add changelog * fix typo * fix error; accept suggestions --- docs/changelog/110248.yaml | 5 + .../inference/inference-apis.asciidoc | 1 + .../inference/put-inference.asciidoc | 1 + .../inference/service-amazon-bedrock.asciidoc | 175 ++++++++++++++++++ .../semantic-search-inference.asciidoc | 1 + .../infer-api-ingest-pipeline-widget.asciidoc | 17 ++ .../infer-api-ingest-pipeline.asciidoc | 26 +++ .../infer-api-mapping-widget.asciidoc | 17 ++ .../inference-api/infer-api-mapping.asciidoc | 35 ++++ .../infer-api-reindex-widget.asciidoc | 17 ++ .../inference-api/infer-api-reindex.asciidoc | 23 +++ .../infer-api-requirements-widget.asciidoc | 17 ++ .../infer-api-requirements.asciidoc | 6 + .../infer-api-search-widget.asciidoc | 17 ++ .../inference-api/infer-api-search.asciidoc | 65 +++++++ .../infer-api-task-widget.asciidoc | 17 ++ .../inference-api/infer-api-task.asciidoc | 26 +++ 17 files changed, 466 insertions(+) create mode 100644 docs/changelog/110248.yaml create mode 100644 docs/reference/inference/service-amazon-bedrock.asciidoc diff --git a/docs/changelog/110248.yaml b/docs/changelog/110248.yaml new file mode 100644 index 0000000000000..85739528b69c6 --- /dev/null +++ b/docs/changelog/110248.yaml @@ -0,0 +1,5 @@ +pr: 110248 +summary: "[Inference API] Add Amazon Bedrock Support to Inference API" +area: Machine Learning +type: enhancement +issues: [ ] diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 02a57504da1cf..9c75820a8f92b 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -25,6 +25,7 @@ include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] include::put-inference.asciidoc[] +include::service-amazon-bedrock.asciidoc[] include::service-azure-ai-studio.asciidoc[] include::service-azure-openai.asciidoc[] include::service-cohere.asciidoc[] diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 656feb54ffe42..bf92d830d9b69 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -34,6 +34,7 @@ The create {infer} API enables you to create an {infer} endpoint and configure a The following services are available through the {infer} API, click the links to review the configuration details of the services: +* <> * <> * <> * <> diff --git a/docs/reference/inference/service-amazon-bedrock.asciidoc b/docs/reference/inference/service-amazon-bedrock.asciidoc new file mode 100644 index 0000000000000..4ffa368613a0e --- /dev/null +++ b/docs/reference/inference/service-amazon-bedrock.asciidoc @@ -0,0 +1,175 @@ +[[infer-service-amazon-bedrock]] +=== Amazon Bedrock {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `amazonbedrock` service. + +[discrete] +[[infer-service-amazon-bedrock-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-amazon-bedrock-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `completion`, +* `text_embedding`. +-- + +[discrete] +[[infer-service-amazon-bedrock-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) The type of service supported for the specified task type. +In this case, +`amazonbedrock`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `amazonbedrock` service. +-- + +`access_key`::: +(Required, string) +A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. + +`secret_key`::: +(Required, string) +A valid AWS secret key that is paired with the `access_key`. +To create or manage access and secret keys, see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html[Managing access keys for IAM users] in the AWS documentation. + +IMPORTANT: You need to provide the access and secret keys only once, during the {infer} model creation. +The <> does not retrieve your access or secret keys. +After creating the {infer} model, you cannot change the associated key pairs. +If you want to use a different access and secret key pair, delete the {infer} model and recreate it with the same name and the updated keys. + +`provider`::: +(Required, string) +The model provider for your deployment. +Note that some providers may support only certain task types. +Supported providers include: + +* `amazontitan` - available for `text_embedding` and `completion` task types +* `anthropic` - available for `completion` task type only +* `ai21labs` - available for `completion` task type only +* `cohere` - available for `text_embedding` and `completion` task types +* `meta` - available for `completion` task type only +* `mistral` - available for `completion` task type only + +`model`::: +(Required, string) +The base model ID or an ARN to a custom model based on a foundational model. +The base model IDs can be found in the https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html[Amazon Bedrock model IDs] documentation. +Note that the model ID must be available for the provider chosen, and your IAM user must have access to the model. + +`region`::: +(Required, string) +The region that your model or ARN is deployed in. +The list of available regions per model can be found in the https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html[Model support by AWS region] documentation. + +`rate_limit`::: +(Optional, object) +By default, the `amazonbedrock` service sets the number of requests allowed per minute to `240`. +This helps to minimize the number of rate limit errors returned from Amazon Bedrock. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] +-- + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `completion` task type +[%collapsible%closed] +===== + +`max_new_tokens`::: +(Optional, integer) +Sets the maximum number for the output tokens to be generated. +Defaults to 64. + +`temperature`::: +(Optional, float) +A number between 0.0 and 1.0 that controls the apparent creativity of the results. At temperature 0.0 the model is most deterministic, at temperature 1.0 most random. +Should not be used if `top_p` or `top_k` is specified. + +`top_p`::: +(Optional, float) +Alternative to `temperature`. A number in the range of 0.0 to 1.0, to eliminate low-probability tokens. Top-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence. +Should not be used if `temperature` is specified. + +`top_k`::: +(Optional, float) +Only available for `anthropic`, `cohere`, and `mistral` providers. +Alternative to `temperature`. Limits samples to the top-K most likely words, balancing coherence and variability. +Should not be used if `temperature` is specified. + +===== ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== + +There are no `task_settings` available for the `text_embedding` task type. + +===== + +[discrete] +[[inference-example-amazonbedrock]] +==== Amazon Bedrock service example + +The following example shows how to create an {infer} endpoint called `amazon_bedrock_embeddings` to perform a `text_embedding` task type. + +Choose chat completion and embeddings models that you have access to from the https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html[Amazon Bedrock base models]. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/amazon_bedrock_embeddings +{ + "service": "amazonbedrock", + "service_settings": { + "access_key": "", + "secret_key": "", + "region": "us-east-1", + "provider": "amazontitan", + "model": "amazon.titan-embed-text-v2:0" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The next example shows how to create an {infer} endpoint called `amazon_bedrock_completion` to perform a `completion` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/completion/amazon_bedrock_completion +{ + "service": "amazonbedrock", + "service_settings": { + "access_key": "", + "secret_key": "", + "region": "us-east-1", + "provider": "amazontitan", + "model": "amazon.titan-text-premier-v1:0" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index 6ecfea0a02dbc..ae27b46d4b876 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -17,6 +17,7 @@ For a list of supported models available on HuggingFace, refer to Azure based examples use models available through https://ai.azure.com/explore/models?selectedTask=embeddings[Azure AI Studio] or https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models[Azure OpenAI]. Mistral examples use the `mistral-embed` model from https://docs.mistral.ai/getting-started/models/[the Mistral API]. +Amazon Bedrock examples use the `amazon.titan-embed-text-v1` model from https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html[the Amazon Bedrock base models]. Click the name of the service you want to use on any of the widgets below to review the corresponding instructions. diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc index c8a42c4d0585a..6039d1de5345b 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc @@ -37,6 +37,12 @@ id="infer-api-ingest-mistral"> Mistral +
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc index a239c79e5a6d1..f95c4a6dbc8c8 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc @@ -164,3 +164,29 @@ PUT _ingest/pipeline/mistral_embeddings and the `output_field` that will contain the {infer} results. // end::mistral[] + +// tag::amazon-bedrock[] + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/amazon_bedrock_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "amazon_bedrock_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference endpoint you created by using the +<>, it's referred to as `inference_id` in that step. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +// end::amazon-bedrock[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc index 80c7c7ef23ee3..66b0cde549545 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc @@ -37,6 +37,12 @@ id="infer-api-mapping-mistral"> Mistral +
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc index a1bce38a02ad2..72c648e63871d 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc @@ -207,3 +207,38 @@ the {infer} pipeline configuration in the next step. <6> The field type which is text in this example. // end::mistral[] + +// tag::amazon-bedrock[] + +[source,console] +-------------------------------------------------- +PUT amazon-bedrock-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "dense_vector", <2> + "dims": 1024, <3> + "element_type": "float", + "similarity": "dot_product" <4> + }, + "content": { <5> + "type": "text" <6> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be referenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `dense_vector` field. +<3> The output dimensions of the model. This value may be different depending on the underlying model used. +See the https://docs.aws.amazon.com/bedrock/latest/userguide/titan-multiemb-models.html[Amazon Titan model] or the https://docs.cohere.com/reference/embed[Cohere Embeddings model] documentation. +<4> For Amazon Bedrock embeddings, the `dot_product` function should be used to +calculate similarity for Amazon titan models, or `cosine` for Cohere models. +<5> The name of the field from which to create the dense vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<6> The field type which is text in this example. + +// end::amazon-bedrock[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc index 4face6a105819..9a8028e2b3c6c 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc @@ -37,6 +37,12 @@ id="infer-api-reindex-mistral"> Mistral +
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc index 927e47ea4d67c..995189f1309aa 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc @@ -154,3 +154,26 @@ number makes the update of the reindexing process quicker which enables you to follow the progress closely and detect errors early. // end::mistral[] + +// tag::amazon-bedrock[] + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "amazon-bedrock-embeddings", + "pipeline": "amazon_bedrock_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +// end::amazon-bedrock[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc index 9981eb90d4929..cf2e4994279d9 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc @@ -37,6 +37,12 @@ id="infer-api-requirements-mistral"> Mistral +
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc index 435e53bbc0bc0..856e4d5f0fe47 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc @@ -39,3 +39,9 @@ You can apply for access to Azure OpenAI by completing the form at https://aka.m * An API key generated for your account // end::mistral[] + +// tag::amazon-bedrock[] +* An AWS Account with https://aws.amazon.com/bedrock/[Amazon Bedrock] access +* A pair of access and secret keys used to access Amazon Bedrock + +// end::amazon-bedrock[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc index 6a67b28f91601..52cf65c4a1509 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc @@ -37,6 +37,12 @@ id="infer-api-search-mistral"> Mistral +
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc index 523c2301e75ff..5e23afeb19a9f 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc @@ -405,3 +405,68 @@ query from the `mistral-embeddings` index sorted by their proximity to the query // NOTCONSOLE // end::mistral[] + +// tag::amazon-bedrock[] + +[source,console] +-------------------------------------------------- +GET amazon-bedrock-embeddings/_search +{ + "knn": { + "field": "content_embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "amazon_bedrock_embeddings", + "model_text": "Calculate fuel cost" + } + }, + "k": 10, + "num_candidates": 100 + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `amazon-bedrock-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "amazon-bedrock-embeddings", + "_id": "DDd5OowBHxQKHyc3TDSC", + "_score": 0.83704096, + "_source": { + "id": 862114, + "body": "How to calculate fuel cost for a road trip. By Tara Baukus Mello • Bankrate.com. Dear Driving for Dollars, My family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost.It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes.y family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost. It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes." + } + }, + { + "_index": "amazon-bedrock-embeddings", + "_id": "ajd5OowBHxQKHyc3TDSC", + "_score": 0.8345704, + "_source": { + "id": 820622, + "body": "Home Heating Calculator. Typically, approximately 50% of the energy consumed in a home annually is for space heating. When deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important.This calculator can help you estimate the cost of fuel for different heating appliances.hen deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important. This calculator can help you estimate the cost of fuel for different heating appliances." + } + }, + { + "_index": "amazon-bedrock-embeddings", + "_id": "Djd5OowBHxQKHyc3TDSC", + "_score": 0.8327426, + "_source": { + "id": 8202683, + "body": "Fuel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel.If you are paying $4 per gallon, the trip would cost you $200.Most boats have much larger gas tanks than cars.uel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE + +// end::amazon-bedrock[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc index 1f3ad645d7c29..d13301b64a871 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc @@ -37,6 +37,12 @@ id="infer-api-task-mistral"> Mistral +
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc index 18fa3ba541bff..c6ef2a46a8731 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc @@ -177,3 +177,29 @@ PUT _inference/text_embedding/mistral_embeddings <1> <3> The Mistral embeddings model name, for example `mistral-embed`. // end::mistral[] + +// tag::amazon-bedrock[] + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/amazon_bedrock_embeddings <1> +{ + "service": "amazonbedrock", + "service_settings": { + "access_key": "", <2> + "secret_key": "", <3> + "region": "", <4> + "provider": "", <5> + "model": "" <6> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `text_embedding` in the path and the `inference_id` which is the unique identifier of the {infer} endpoint is `amazon_bedrock_embeddings`. +<2> The access key can be found on your AWS IAM management page for the user account to access Amazon Bedrock. +<3> The secret key should be the paired key for the specified access key. +<4> Specify the region that your model is hosted in. +<5> Specify the model provider. +<6> The model ID or ARN of the model to use. + +// end::amazon-bedrock[] From 7ee9a24177b8cf8bf23fabda36c61cf6323c0ce5 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 12 Jul 2024 18:23:27 +0200 Subject: [PATCH 053/406] Ignore configs from DistributionDownload plugin and bwc for resolveAllDependencies (#110828) (#110834) --- .../elasticsearch/gradle/DistributionDownloadPlugin.java | 6 ++++-- build.gradle | 8 +++++++- qa/packaging/build.gradle | 5 ----- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index 2bc4aa1a1be36..fe7d303dc522b 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -42,8 +42,10 @@ public class DistributionDownloadPlugin implements Plugin { private static final String FAKE_SNAPSHOT_IVY_GROUP = "elasticsearch-distribution-snapshot"; private static final String DOWNLOAD_REPO_NAME = "elasticsearch-downloads"; private static final String SNAPSHOT_REPO_NAME = "elasticsearch-snapshots"; - public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = "es_distro_extracted_"; - public static final String DISTRO_CONFIG_PREFIX = "es_distro_file_"; + + public static final String ES_DISTRO_CONFIG_PREFIX = "es_distro_"; + public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = ES_DISTRO_CONFIG_PREFIX + "extracted_"; + public static final String DISTRO_CONFIG_PREFIX = ES_DISTRO_CONFIG_PREFIX + "file_"; private final ObjectFactory objectFactory; private NamedDomainObjectContainer distributionsContainer; diff --git a/build.gradle b/build.gradle index 3869d21b49bfe..01fdace570ce0 100644 --- a/build.gradle +++ b/build.gradle @@ -19,6 +19,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.util.GradleUtils import org.gradle.plugins.ide.eclipse.model.AccessRule import org.gradle.plugins.ide.eclipse.model.ProjectDependency +import org.elasticsearch.gradle.DistributionDownloadPlugin import java.nio.file.Files @@ -284,11 +285,16 @@ allprojects { } tasks.register('resolveAllDependencies', ResolveAllDependencies) { - configs = project.configurations + def ignoredPrefixes = [DistributionDownloadPlugin.ES_DISTRO_CONFIG_PREFIX, "jdbcDriver"] + configs = project.configurations.matching { config -> ignoredPrefixes.any { config.name.startsWith(it) } == false } resolveJavaToolChain = true if (project.path.contains("fixture")) { dependsOn tasks.withType(ComposePull) } + if (project.path.contains(":distribution:docker")) { + enabled = false + } + } plugins.withId('lifecycle-base') { diff --git a/qa/packaging/build.gradle b/qa/packaging/build.gradle index d1890e8c49fcf..758dfe6661766 100644 --- a/qa/packaging/build.gradle +++ b/qa/packaging/build.gradle @@ -36,8 +36,3 @@ tasks.named("test").configure { enabled = false } tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } - -tasks.named('resolveAllDependencies') { - // avoid resolving all elasticsearch distros - enabled = false -} From e66bb223236c59a5698b9d861cbfd61c78c502d9 Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Fri, 12 Jul 2024 12:41:06 -0400 Subject: [PATCH 054/406] Remove preview from top level query rules API page (#110838) (#110839) --- docs/reference/query-rules/apis/index.asciidoc | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/reference/query-rules/apis/index.asciidoc b/docs/reference/query-rules/apis/index.asciidoc index f7303647f8515..53d5fc3dc4eee 100644 --- a/docs/reference/query-rules/apis/index.asciidoc +++ b/docs/reference/query-rules/apis/index.asciidoc @@ -1,8 +1,6 @@ [[query-rules-apis]] == Query rules APIs -preview::[] - ++++ Query rules APIs ++++ From 223647fe3f7b205e863f3f90997a35c6cf353973 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Fri, 12 Jul 2024 10:38:40 -0700 Subject: [PATCH 055/406] [8.15] Fix RollupIndexerStateTests (#110804) --- .../xpack/rollup/job/RollupIndexerStateTests.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 105711c4057a6..ad5e6a0cf9b40 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -41,6 +41,7 @@ import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.startsWith; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -556,22 +557,22 @@ public void testMultipleJobTriggering() throws Exception { assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); // This may take more than one attempt due to a cleanup/transition phase // that happens after state change to STARTED (`isJobFinishing`). - assertBusy(() -> indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); assertFalse(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED))); - assertThat(indexer.getStats().getNumInvocations(), equalTo((long) i + 1)); assertThat(indexer.getStats().getNumPages(), equalTo((long) i + 1)); } final CountDownLatch latch = indexer.newLatch(); - assertBusy(() -> indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); assertThat(indexer.stop(), equalTo(IndexerState.STOPPING)); assertThat(indexer.getState(), Matchers.either(Matchers.is(IndexerState.STOPPING)).or(Matchers.is(IndexerState.STOPPED))); latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STOPPED))); assertTrue(indexer.abort()); + assertThat(indexer.getStats().getNumInvocations(), greaterThanOrEqualTo(6L)); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } From 10397deb85cabc7bf5f85d20cfd08329ff97f710 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 15 Jul 2024 11:41:02 +0100 Subject: [PATCH 056/406] [DOCS] Add note about ML model 502 timeout when using `Create inference API` (#110835) (#110865) * [DOCS] Add note about ml model 502 timeout * Add note to API ref --- docs/reference/inference/put-inference.asciidoc | 10 ++++++++-- .../semantic-search-semantic-text.asciidoc | 8 +++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index bf92d830d9b69..b809a96b8f81a 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -11,7 +11,6 @@ IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. - [discrete] [[put-inference-api-request]] ==== {api-request-title} @@ -25,7 +24,6 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo * Requires the `manage_inference` <> (the built-in `inference_admin` role grants this privilege) - [discrete] [[put-inference-api-desc]] ==== {api-description-title} @@ -45,3 +43,11 @@ The following services are available through the {infer} API, click the links to * <> * <> * <> + +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== \ No newline at end of file diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index c2dabedb0336c..2b8b6c9c25afe 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -24,7 +24,6 @@ This tutorial uses the <> for demonstra To use the `semantic_text` field type, you must have an {infer} endpoint deployed in your cluster using the <>. - [discrete] [[semantic-text-infer-endpoint]] ==== Create the {infer} endpoint @@ -48,6 +47,13 @@ be used and ELSER creates sparse vectors. The `inference_id` is `my-elser-endpoint`. <2> The `elser` service is used in this example. +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== [discrete] [[semantic-text-index-mapping]] From 0fbde2c3fb3c0513bd68d14e16ef869c1fe3a51a Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Mon, 15 Jul 2024 15:55:07 +0300 Subject: [PATCH 057/406] Do not write dangling indices in a test (#110867) (#110869) So that the only expected disk write at the point of the assertion is from the bulk request. And not from the asynchronous runnable of updateDanglingIndicesInfo(). Fixes #110551 --- .../action/bulk/BulkAfterWriteFsyncFailureIT.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java index 6a4e973d8fcc5..d531686bb5207 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java @@ -29,6 +29,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.indices.IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -48,7 +49,11 @@ public static void removeDisruptFSyncFS() { PathUtilsForTesting.teardown(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110551") + @Override + protected Settings nodeSettings() { + return Settings.builder().put(WRITE_DANGLING_INDICES_INFO_SETTING.getKey(), false).build(); + } + public void testFsyncFailureDoesNotAdvanceLocalCheckpoints() { String indexName = randomIdentifier(); client().admin() From 6c12360958ee325e5d050f1e40e3cf54002e21a5 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 15 Jul 2024 15:38:21 +0200 Subject: [PATCH 058/406] ES|QL: better validation for GROK patterns (#110574) (#110682) Co-authored-by: Elastic Machine --- .../xpack/esql/parser/LogicalPlanBuilder.java | 20 ++++++++++++++++++- .../xpack/esql/plan/logical/Grok.java | 2 +- .../esql/parser/StatementParserTests.java | 18 ++++++++++++++--- 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index fee51c40a2525..84c849a759ae5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -146,12 +146,30 @@ public PlanFactory visitEvalCommand(EsqlBaseParser.EvalCommandContext ctx) { @Override public PlanFactory visitGrokCommand(EsqlBaseParser.GrokCommandContext ctx) { return p -> { + Source source = source(ctx); String pattern = visitString(ctx.string()).fold().toString(); - Grok result = new Grok(source(ctx), p, expression(ctx.primaryExpression()), Grok.pattern(source(ctx), pattern)); + Grok.Parser grokParser = Grok.pattern(source, pattern); + validateGrokPattern(source, grokParser, pattern); + Grok result = new Grok(source(ctx), p, expression(ctx.primaryExpression()), grokParser); return result; }; } + private void validateGrokPattern(Source source, Grok.Parser grokParser, String pattern) { + Map definedAttributes = new HashMap<>(); + for (Attribute field : grokParser.extractedFields()) { + String name = field.name(); + DataType type = field.dataType(); + DataType prev = definedAttributes.put(name, type); + if (prev != null) { + throw new ParsingException( + source, + "Invalid GROK pattern [" + pattern + "]: the attribute [" + name + "] is defined multiple times with different types" + ); + } + } + } + @Override public PlanFactory visitDissectCommand(EsqlBaseParser.DissectCommandContext ctx) { return p -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java index 0c1c400f3ab4d..e495a2eb76668 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java @@ -32,7 +32,7 @@ public class Grok extends RegexExtract { public record Parser(String pattern, org.elasticsearch.grok.Grok grok) { - private List extractedFields() { + public List extractedFields() { return grok.captureConfig() .stream() .sorted(Comparator.comparing(GrokCaptureConfig::name)) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index fd046d8dd1cff..111c90790caf0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -769,15 +769,27 @@ public void testDissectPattern() { public void testGrokPattern() { LogicalPlan cmd = processingCommand("grok a \"%{WORD:foo}\""); assertEquals(Grok.class, cmd.getClass()); - Grok dissect = (Grok) cmd; - assertEquals("%{WORD:foo}", dissect.parser().pattern()); - assertEquals(List.of(referenceAttribute("foo", KEYWORD)), dissect.extractedFields()); + Grok grok = (Grok) cmd; + assertEquals("%{WORD:foo}", grok.parser().pattern()); + assertEquals(List.of(referenceAttribute("foo", KEYWORD)), grok.extractedFields()); ParsingException pe = expectThrows(ParsingException.class, () -> statement("row a = \"foo bar\" | grok a \"%{_invalid_:x}\"")); assertThat( pe.getMessage(), containsString("Invalid pattern [%{_invalid_:x}] for grok: Unable to find pattern [_invalid_] in Grok's pattern dictionary") ); + + cmd = processingCommand("grok a \"%{WORD:foo} %{WORD:foo}\""); + assertEquals(Grok.class, cmd.getClass()); + grok = (Grok) cmd; + assertEquals("%{WORD:foo} %{WORD:foo}", grok.parser().pattern()); + assertEquals(List.of(referenceAttribute("foo", KEYWORD)), grok.extractedFields()); + + expectError( + "row a = \"foo bar\" | GROK a \"%{NUMBER:foo} %{WORD:foo}\"", + "line 1:22: Invalid GROK pattern [%{NUMBER:foo} %{WORD:foo}]:" + + " the attribute [foo] is defined multiple times with different types" + ); } public void testLikeRLike() { From d5085ac93c731a014a23187f53a0b1f604eeae52 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 15 Jul 2024 10:24:00 -0400 Subject: [PATCH 059/406] [Backport] 110824 to 8.15 (#110843) * [ESQL] Count_distinct(_source) should return a 400 (#110824) Resolves [#105240](https://github.com/elastic/elasticsearch/issues/105240) Count_distinct doesn't work on source, but the type resolution was allowing that through. This resulted in a 500 layer deeper in the aggregations code. This PR fixes the 500 error by correctly failing during type resolution. * Even hand-backporting, I messed up the capabilities file * one more --- docs/changelog/110824.yaml | 5 +++++ .../xpack/esql/action/EsqlCapabilities.java | 8 +++++++- .../function/aggregate/CountDistinct.java | 4 ++-- .../xpack/esql/planner/AggregateMapper.java | 2 +- .../xpack/esql/analysis/AnalyzerTests.java | 2 +- .../rest-api-spec/test/esql/140_metadata.yml | 17 ++++++++++++++++- 6 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/110824.yaml diff --git a/docs/changelog/110824.yaml b/docs/changelog/110824.yaml new file mode 100644 index 0000000000000..4fe97d6692865 --- /dev/null +++ b/docs/changelog/110824.yaml @@ -0,0 +1,5 @@ +pr: 110824 +summary: "[ESQL] Count_distinct(_source) should return a 400" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 2d3b7255caedd..8f24cd113a056 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -112,7 +112,13 @@ public enum Cap { * Fix a parsing issue where numbers below Long.MIN_VALUE threw an exception instead of parsing as doubles. * see Parsing large numbers is inconsistent #104323 */ - FIX_PARSING_LARGE_NEGATIVE_NUMBERS; + FIX_PARSING_LARGE_NEGATIVE_NUMBERS, + + /** + * Fix the status code returned when trying to run count_distinct on the _source type (which is not supported). + * see count_distinct(_source) returns a 500 response + */ + FIX_COUNT_DISTINCT_SOURCE_ERROR; private final boolean snapshotOnly; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 5e61f69758a47..7686d10a03d9e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -116,10 +116,10 @@ protected TypeResolution resolveType() { boolean resolved = resolution.resolved(); resolution = isType( field(), - dt -> resolved && dt != DataType.UNSIGNED_LONG, + dt -> resolved && dt != DataType.UNSIGNED_LONG && dt != DataType.SOURCE, sourceText(), DEFAULT, - "any exact type except unsigned_long or counter types" + "any exact type except unsigned_long, _source, or counter types" ); if (resolution.unresolved() || precision == null) { return resolution; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 91433e42033c5..009e65cda9251 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -264,7 +264,7 @@ private static DataType toDataType(ElementType elementType) { case INT -> DataType.INTEGER; case LONG -> DataType.LONG; case DOUBLE -> DataType.DOUBLE; - default -> throw new EsqlIllegalArgumentException("unsupported agg type: " + elementType); + case FLOAT, NULL, DOC, COMPOSITE, UNKNOWN -> throw new EsqlIllegalArgumentException("unsupported agg type: " + elementType); }; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 1f2ec0c236ecf..82a9699412a34 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1830,7 +1830,7 @@ public void testUnsupportedTypesInStats() { Found 8 problems line 2:12: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] - line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long or counter types],\ + line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long, _source, or counter types],\ found value [x] type [unsigned_long] line 2:39: argument of [max(x)] must be [datetime or numeric except unsigned_long or counter types],\ found value [max(x)] type [unsigned_long] diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml index d6c1c6c97944a..33c9cc7558672 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml @@ -5,7 +5,7 @@ setup: - method: POST path: /_query parameters: [method, path, parameters, capabilities] - capabilities: [metadata_fields, metadata_field_ignored] + capabilities: [metadata_fields, metadata_ignored_field] reason: "Ignored metadata field capability required" - do: @@ -140,3 +140,18 @@ setup: - match: {columns.0.name: "count_distinct(_ignored)"} - match: {columns.0.type: "long"} - match: {values.0.0: 2} + +--- +"Count_distinct on _source is a 400 error": + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [fix_count_distinct_source_error] + reason: "Capability marks fixing this bug" + - do: + catch: bad_request + esql.query: + body: + query: 'FROM test [metadata _source] | STATS COUNT_DISTINCT(_source)' From eb36da88f563c4c349c06546cd704c2c9651e8a9 Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Mon, 15 Jul 2024 14:22:15 -0400 Subject: [PATCH 060/406] Revert "Search coordinator uses event.ingested in cluster state to do rewrites (#110352) (#110782)" (#110883) This reverts commit 9092394b19dea9e0f20290a2571a82b1d3610987. --- docs/changelog/110352.yaml | 5 - .../TimestampFieldMapperServiceTests.java | 4 +- .../query/CoordinatorRewriteContext.java | 113 +---- .../CoordinatorRewriteContextProvider.java | 30 +- .../index/query/RangeQueryBuilder.java | 6 +- .../indices/DateFieldRangeInfo.java | 51 --- .../elasticsearch/indices/IndicesService.java | 19 +- .../indices/TimestampFieldMapperService.java | 56 +-- .../CanMatchPreFilterSearchPhaseTests.java | 340 +++------------ .../test/AbstractBuilderTestCase.java | 11 +- .../index/engine/frozen/FrozenIndexIT.java | 163 +------ ...pshotsCanMatchOnCoordinatorIntegTests.java | 409 ++---------------- 12 files changed, 173 insertions(+), 1034 deletions(-) delete mode 100644 docs/changelog/110352.yaml delete mode 100644 server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java diff --git a/docs/changelog/110352.yaml b/docs/changelog/110352.yaml deleted file mode 100644 index 7dad1ce5f6dd4..0000000000000 --- a/docs/changelog/110352.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110352 -summary: Search coordinator uses `event.ingested` in cluster state to do rewrites -area: Search -type: enhancement -issues: [] diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index eb35c44d30331..97959fa385241 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -61,7 +61,7 @@ public void testGetTimestampFieldTypeForTsdbDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); assertThat(result, notNullValue()); } @@ -70,7 +70,7 @@ public void testGetTimestampFieldTypeForDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); assertThat(result, nullValue()); } diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index f2fc7c1bd6cd0..2a1062f8876d2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -9,14 +9,11 @@ package org.elasticsearch.index.query; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.Collections; @@ -26,24 +23,19 @@ * Context object used to rewrite {@link QueryBuilder} instances into simplified version in the coordinator. * Instances of this object rely on information stored in the {@code IndexMetadata} for certain indices. * Right now this context object is able to rewrite range queries that include a known timestamp field - * (i.e. the timestamp field for DataStreams or the 'event.ingested' field in ECS) into a MatchNoneQueryBuilder - * and skip the shards that don't hold queried data. See IndexMetadata for more details. + * (i.e. the timestamp field for DataStreams) into a MatchNoneQueryBuilder and skip the shards that + * don't hold queried data. See IndexMetadata#getTimestampRange() for more details */ public class CoordinatorRewriteContext extends QueryRewriteContext { - private final DateFieldRangeInfo dateFieldRangeInfo; + private final IndexLongFieldRange indexLongFieldRange; + private final DateFieldMapper.DateFieldType timestampFieldType; - /** - * Context for coordinator search rewrites based on time ranges for the @timestamp field and/or 'event.ingested' field - * @param parserConfig - * @param client - * @param nowInMillis - * @param dateFieldRangeInfo range and field type info for @timestamp and 'event.ingested' - */ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - DateFieldRangeInfo dateFieldRangeInfo + IndexLongFieldRange indexLongFieldRange, + DateFieldMapper.DateFieldType timestampFieldType ) { super( parserConfig, @@ -61,98 +53,29 @@ public CoordinatorRewriteContext( null, null ); - this.dateFieldRangeInfo = dateFieldRangeInfo; + this.indexLongFieldRange = indexLongFieldRange; + this.timestampFieldType = timestampFieldType; } - /** - * Get min timestamp for either '@timestamp' or 'event.ingested' fields. Any other field - * passed in will cause an {@link IllegalArgumentException} to be thrown, as these are the only - * two fields supported for coordinator rewrites (based on time range). - * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME - * @return min timestamp for the field from IndexMetadata in cluster state. - */ - long getMinTimestamp(String fieldName) { - if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getTimestampRange().getMin(); - } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getEventIngestedRange().getMin(); - } else { - throw new IllegalArgumentException( - Strings.format( - "Only [%s] or [%s] fields are supported for min timestamp coordinator rewrites, but got: [%s]", - DataStream.TIMESTAMP_FIELD_NAME, - IndexMetadata.EVENT_INGESTED_FIELD_NAME, - fieldName - ) - ); - } + long getMinTimestamp() { + return indexLongFieldRange.getMin(); } - /** - * Get max timestamp for either '@timestamp' or 'event.ingested' fields. Any other field - * passed in will cause an {@link IllegalArgumentException} to be thrown, as these are the only - * two fields supported for coordinator rewrites (based on time range). - * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME - * @return max timestamp for the field from IndexMetadata in cluster state. - */ - long getMaxTimestamp(String fieldName) { - if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getTimestampRange().getMax(); - } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getEventIngestedRange().getMax(); - } else { - throw new IllegalArgumentException( - Strings.format( - "Only [%s] or [%s] fields are supported for max timestamp coordinator rewrites, but got: [%s]", - DataStream.TIMESTAMP_FIELD_NAME, - IndexMetadata.EVENT_INGESTED_FIELD_NAME, - fieldName - ) - ); - } + long getMaxTimestamp() { + return indexLongFieldRange.getMax(); } - /** - * Determine whether either '@timestamp' or 'event.ingested' fields has useful timestamp ranges - * stored in cluster state for this context. - * Any other fieldname will cause an {@link IllegalArgumentException} to be thrown, as these are the only - * two fields supported for coordinator rewrites (based on time range). - * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME - * @return min timestamp for the field from IndexMetadata in cluster state. - */ - boolean hasTimestampData(String fieldName) { - if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getTimestampRange().isComplete() - && dateFieldRangeInfo.getTimestampRange() != IndexLongFieldRange.EMPTY; - } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getEventIngestedRange().isComplete() - && dateFieldRangeInfo.getEventIngestedRange() != IndexLongFieldRange.EMPTY; - } else { - throw new IllegalArgumentException( - Strings.format( - "Only [%s] or [%s] fields are supported for min/max timestamp coordinator rewrites, but got: [%s]", - DataStream.TIMESTAMP_FIELD_NAME, - IndexMetadata.EVENT_INGESTED_FIELD_NAME, - fieldName - ) - ); - } + boolean hasTimestampData() { + return indexLongFieldRange.isComplete() && indexLongFieldRange != IndexLongFieldRange.EMPTY; } - /** - * @param fieldName Get MappedFieldType for either '@timestamp' or 'event.ingested' fields. - * @return min timestamp for the field from IndexMetadata in cluster state or null if fieldName was not - * DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME. - */ @Nullable public MappedFieldType getFieldType(String fieldName) { - if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getTimestampFieldType(); - } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { - return dateFieldRangeInfo.getEventIngestedFieldType(); - } else { + if (fieldName.equals(timestampFieldType.name()) == false) { return null; } + + return timestampFieldType; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index 8251b82c05af2..e44861b4afe8a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -14,7 +14,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.function.Function; @@ -26,14 +25,14 @@ public class CoordinatorRewriteContextProvider { private final Client client; private final LongSupplier nowInMillis; private final Supplier clusterStateSupplier; - private final Function mappingSupplier; + private final Function mappingSupplier; public CoordinatorRewriteContextProvider( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, Supplier clusterStateSupplier, - Function mappingSupplier + Function mappingSupplier ) { this.parserConfig = parserConfig; this.client = client; @@ -50,33 +49,18 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { if (indexMetadata == null) { return null; } - - DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); - if (dateFieldRangeInfo == null) { + DateFieldMapper.DateFieldType dateFieldType = mappingSupplier.apply(index); + if (dateFieldType == null) { return null; } - - DateFieldMapper.DateFieldType timestampFieldType = dateFieldRangeInfo.getTimestampFieldType(); - DateFieldMapper.DateFieldType eventIngestedFieldType = dateFieldRangeInfo.getEventIngestedFieldType(); IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); - IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); - if (timestampRange.containsAllShardRanges() == false) { - // if @timestamp range is not present or not ready in cluster state, fallback to using time series range (if present) - timestampRange = indexMetadata.getTimeSeriesTimestampRange(timestampFieldType); - // if timestampRange in the time series is null AND the eventIngestedRange is not ready for use, return null (no coord rewrite) - if (timestampRange == null && eventIngestedRange.containsAllShardRanges() == false) { + timestampRange = indexMetadata.getTimeSeriesTimestampRange(dateFieldType); + if (timestampRange == null) { return null; } } - // the DateFieldRangeInfo from the mappingSupplier only has field types, but not ranges - // so create a new object with ranges pulled from cluster state - return new CoordinatorRewriteContext( - parserConfig, - client, - nowInMillis, - new DateFieldRangeInfo(timestampFieldType, timestampRange, eventIngestedFieldType, eventIngestedRange) - ); + return new CoordinatorRewriteContext(parserConfig, client, nowInMillis, timestampRange, dateFieldType); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index ac7fae8ec0145..4d2a6d3eaecdb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -436,11 +436,11 @@ public String getWriteableName() { protected MappedFieldType.Relation getRelation(final CoordinatorRewriteContext coordinatorRewriteContext) { final MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(fieldName); if (fieldType instanceof final DateFieldMapper.DateFieldType dateFieldType) { - if (coordinatorRewriteContext.hasTimestampData(fieldName) == false) { + if (coordinatorRewriteContext.hasTimestampData() == false) { return MappedFieldType.Relation.DISJOINT; } - long minTimestamp = coordinatorRewriteContext.getMinTimestamp(fieldName); - long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(fieldName); + long minTimestamp = coordinatorRewriteContext.getMinTimestamp(); + long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(); DateMathParser dateMathParser = getForceDateParser(); return dateFieldType.isFieldWithinQuery( minTimestamp, diff --git a/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java deleted file mode 100644 index ddeb3f370be12..0000000000000 --- a/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.indices; - -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.shard.IndexLongFieldRange; - -/** - * Data holder of timestamp fields held in cluster state IndexMetadata. - */ -public final class DateFieldRangeInfo { - - private final DateFieldMapper.DateFieldType timestampFieldType; - private final IndexLongFieldRange timestampRange; - private final DateFieldMapper.DateFieldType eventIngestedFieldType; - private final IndexLongFieldRange eventIngestedRange; - - public DateFieldRangeInfo( - DateFieldMapper.DateFieldType timestampFieldType, - IndexLongFieldRange timestampRange, - DateFieldMapper.DateFieldType eventIngestedFieldType, - IndexLongFieldRange eventIngestedRange - ) { - this.timestampFieldType = timestampFieldType; - this.timestampRange = timestampRange; - this.eventIngestedFieldType = eventIngestedFieldType; - this.eventIngestedRange = eventIngestedRange; - } - - public DateFieldMapper.DateFieldType getTimestampFieldType() { - return timestampFieldType; - } - - public IndexLongFieldRange getTimestampRange() { - return timestampRange; - } - - public DateFieldMapper.DateFieldType getEventIngestedFieldType() { - return eventIngestedFieldType; - } - - public IndexLongFieldRange getEventIngestedRange() { - return eventIngestedRange; - } -} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 203d7d5a0aba8..0d81d24e64646 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -98,6 +98,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; @@ -1763,13 +1764,7 @@ public DataRewriteContext getDataRewriteContext(LongSupplier nowInMillis) { } public CoordinatorRewriteContextProvider getCoordinatorRewriteContextProvider(LongSupplier nowInMillis) { - return new CoordinatorRewriteContextProvider( - parserConfig, - client, - nowInMillis, - clusterService::state, - this::getTimestampFieldTypeInfo - ); + return new CoordinatorRewriteContextProvider(parserConfig, client, nowInMillis, clusterService::state, this::getTimestampFieldType); } /** @@ -1859,16 +1854,14 @@ public boolean allPendingDanglingIndicesWritten() { } /** - * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. - * or {@code null} if: + * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: * - the index is not found, * - the field is not found, or - * - the mapping is not known yet, or - * - the index does not have a useful timestamp field. + * - the field is not a timestamp field. */ @Nullable - public DateFieldRangeInfo getTimestampFieldTypeInfo(Index index) { - return timestampFieldMapperService.getTimestampFieldTypeMap(index); + public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { + return timestampFieldMapperService.getTimestampFieldType(index); } public IndexScopedSettings getIndexScopedSettings() { diff --git a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java index 9b23762e29490..4caeaef6514e5 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java @@ -42,9 +42,8 @@ import static org.elasticsearch.core.Strings.format; /** - * Tracks the mapping of the '@timestamp' and 'event.ingested' fields of immutable indices that expose their timestamp range in their - * index metadata. Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of these two fields - * from the mapping here, since timestamp fields can have millis or nanos level resolution. + * Tracks the mapping of the {@code @timestamp} field of immutable indices that expose their timestamp range in their index metadata. + * Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of this one field from the mapping here. */ public class TimestampFieldMapperService extends AbstractLifecycleComponent implements ClusterStateApplier { @@ -54,12 +53,10 @@ public class TimestampFieldMapperService extends AbstractLifecycleComponent impl private final ExecutorService executor; // single thread to construct mapper services async as needed /** - * The type of the 'event.ingested' and/or '@timestamp' fields keyed by index. - * The inner map is keyed by field name ('@timestamp' or 'event.ingested'). - * Futures may be completed with {@code null} to indicate that there is - * no usable timestamp field. + * The type of the {@code @timestamp} field keyed by index. Futures may be completed with {@code null} to indicate that there is + * no usable {@code @timestamp} field. */ - private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); + private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); public TimestampFieldMapperService(Settings settings, ThreadPool threadPool, IndicesService indicesService) { this.indicesService = indicesService; @@ -105,8 +102,8 @@ public void applyClusterState(ClusterChangedEvent event) { final Index index = indexMetadata.getIndex(); if (hasUsefulTimestampField(indexMetadata) && fieldTypesByIndex.containsKey(index) == false) { - logger.trace("computing timestamp mapping(s) for {}", index); - final PlainActionFuture future = new PlainActionFuture<>(); + logger.trace("computing timestamp mapping for {}", index); + final PlainActionFuture future = new PlainActionFuture<>(); fieldTypesByIndex.put(index, future); final IndexService indexService = indicesService.indexService(index); @@ -151,45 +148,29 @@ private static boolean hasUsefulTimestampField(IndexMetadata indexMetadata) { return true; } - IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); - if (timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN) { - return true; - } - - IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); - return eventIngestedRange.isComplete() && eventIngestedRange != IndexLongFieldRange.UNKNOWN; + final IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + return timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN; } - private static DateFieldRangeInfo fromMapperService(MapperService mapperService) { - DateFieldMapper.DateFieldType timestampFieldType = null; - DateFieldMapper.DateFieldType eventIngestedFieldType = null; - - MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); - if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { - timestampFieldType = dateFieldType; - } - mappedFieldType = mapperService.fieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME); - if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { - eventIngestedFieldType = dateFieldType; - } - if (timestampFieldType == null && eventIngestedFieldType == null) { + private static DateFieldMapper.DateFieldType fromMapperService(MapperService mapperService) { + final MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType) { + return (DateFieldMapper.DateFieldType) mappedFieldType; + } else { return null; } - // the mapper only fills in the field types, not the actual range values - return new DateFieldRangeInfo(timestampFieldType, null, eventIngestedFieldType, null); } /** - * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. - * or {@code null} if: + * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: * - the index is not found, * - the field is not found, * - the mapping is not known yet, or - * - the index does not have a useful timestamp field. + * - the field is not a timestamp field. */ @Nullable - public DateFieldRangeInfo getTimestampFieldTypeMap(Index index) { - final PlainActionFuture future = fieldTypesByIndex.get(index); + public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { + final PlainActionFuture future = fieldTypesByIndex.get(index); if (future == null || future.isDone() == false) { return null; } @@ -200,4 +181,5 @@ public DateFieldRangeInfo getTimestampFieldTypeMap(Index index) { throw new UncategorizedExecutionException("An error occurred fetching timestamp field type for " + index, e); } } + } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index e61d86bbf2a58..70c4d73f578b3 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.search; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.CanMatchNodeResponse.ResponseOrFailure; @@ -27,6 +26,8 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -37,7 +38,6 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; -import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -72,7 +72,6 @@ import static org.elasticsearch.action.search.SearchAsyncActionTests.getShardsIter; import static org.elasticsearch.core.Types.forciblyCast; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; @@ -465,17 +464,7 @@ public void sendCanMatch( } } - // test using @timestamp - public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingTimestamp() throws Exception { - doCanMatchFilteringOnCoordinatorThatCanBeSkipped(DataStream.TIMESTAMP_FIELD_NAME); - } - - // test using event.ingested - public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingEventIngested() throws Exception { - doCanMatchFilteringOnCoordinatorThatCanBeSkipped(IndexMetadata.EVENT_INGESTED_FIELD_NAME); - } - - public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampField) throws Exception { + public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exception { Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); @@ -486,10 +475,15 @@ public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampFie long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timestampField, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps( + dataStreamIndex, + DataStream.TIMESTAMP_FIELD_NAME, + indexMinTimestamp, + indexMaxTimestamp + ); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timestampField); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -541,90 +535,6 @@ public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampFie ); } - public void testCoordinatorCanMatchFilteringThatCanBeSkippedUsingBothTimestamps() throws Exception { - Index dataStreamIndex1 = new Index(".ds-twoTimestamps0001", UUIDs.base64UUID()); - Index dataStreamIndex2 = new Index(".ds-twoTimestamps0002", UUIDs.base64UUID()); - DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); - - List regularIndices = randomList(1, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); - - long indexMinTimestamp = randomLongBetween(0, 5000); - long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); - StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - for (Index dataStreamIndex : dataStream.getIndices()) { - // use same range for both @timestamp and event.ingested - contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( - dataStreamIndex, - indexMinTimestamp, - indexMaxTimestamp, - indexMinTimestamp, - indexMaxTimestamp - ); - } - - /** - * Expected behavior: if either @timestamp or 'event.ingested' filters in the query are "out of range" (do not - * overlap the range in cluster state), then all shards in the datastream should be skipped. - * Only if both @timestamp or 'event.ingested' filters are "in range" should the data stream shards be searched - */ - boolean timestampQueryOutOfRange = randomBoolean(); - boolean eventIngestedQueryOutOfRange = randomBoolean(); - int timestampOffset = timestampQueryOutOfRange ? 1 : -500; - int eventIngestedOffset = eventIngestedQueryOutOfRange ? 1 : -500; - - RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); - tsRangeQueryBuilder.from(indexMaxTimestamp + timestampOffset).to(indexMaxTimestamp + 2); - - RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); - eventIngestedRangeQueryBuilder.from(indexMaxTimestamp + eventIngestedOffset).to(indexMaxTimestamp + 2); - - BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); - - if (randomBoolean()) { - // Add an additional filter that cannot be evaluated in the coordinator but shouldn't - // affect the end result as we're filtering - queryBuilder.filter(new TermQueryBuilder("fake", "value")); - } - - assignShardsAndExecuteCanMatchPhase( - List.of(dataStream), - regularIndices, - contextProviderBuilder.build(), - queryBuilder, - List.of(), - null, - (updatedSearchShardIterators, requests) -> { - List skippedShards = updatedSearchShardIterators.stream().filter(SearchShardIterator::skip).toList(); - List nonSkippedShards = updatedSearchShardIterators.stream() - .filter(searchShardIterator -> searchShardIterator.skip() == false) - .toList(); - - if (timestampQueryOutOfRange || eventIngestedQueryOutOfRange) { - // data stream shards should have been skipped - assertThat(skippedShards.size(), greaterThan(0)); - boolean allSkippedShardAreFromDataStream = skippedShards.stream() - .allMatch(shardIterator -> dataStream.getIndices().contains(shardIterator.shardId().getIndex())); - assertThat(allSkippedShardAreFromDataStream, equalTo(true)); - - boolean allNonSkippedShardsAreFromRegularIndices = nonSkippedShards.stream() - .allMatch(shardIterator -> regularIndices.contains(shardIterator.shardId().getIndex())); - assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); - - boolean allRequestsWereTriggeredAgainstRegularIndices = requests.stream() - .allMatch(request -> regularIndices.contains(request.shardId().getIndex())); - assertThat(allRequestsWereTriggeredAgainstRegularIndices, equalTo(true)); - - } else { - assertThat(skippedShards.size(), equalTo(0)); - long countSkippedShardsFromDatastream = nonSkippedShards.stream() - .filter(iter -> dataStream.getIndices().contains(iter.shardId().getIndex())) - .count(); - assertThat(countSkippedShardsFromDatastream, greaterThan(0L)); - } - } - ); - } - public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); @@ -632,16 +542,19 @@ public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); - String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); - long indexMinTimestamp = randomLongBetween(0, 5000); long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps( + dataStreamIndex, + DataStream.TIMESTAMP_FIELD_NAME, + indexMinTimestamp, + indexMaxTimestamp + ); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); // Query with a non default date format rangeQueryBuilder.from("2020-1-01").to("2021-1-01"); @@ -672,20 +585,23 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); - String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); - long indexMinTimestamp = 10; long indexMaxTimestamp = 20; StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps( + dataStreamIndex, + DataStream.TIMESTAMP_FIELD_NAME, + indexMinTimestamp, + indexMaxTimestamp + ); } BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); // Query inside of the data stream index range if (randomBoolean()) { // Query generation - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); // We query a range within the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMinTimestamp).to(indexMaxTimestamp); @@ -698,7 +614,8 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep } } else { // We query a range outside of the timestamp range covered by both datastream indices - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField).from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestamp + 1) + .to(indexMaxTimestamp + 2); TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); @@ -718,86 +635,17 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep ); } - public void testCanMatchFilteringOnCoordinatorWithTimestampAndEventIngestedThatCanNotBeSkipped() throws Exception { - // Generate indices - Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); - Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); - DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); - - List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); - - long indexMinTimestampForTs = 10; - long indexMaxTimestampForTs = 20; - long indexMinTimestampForEventIngested = 10; - long indexMaxTimestampForEventIngested = 20; - StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( - dataStreamIndex, - indexMinTimestampForTs, - indexMaxTimestampForTs, - indexMinTimestampForEventIngested, - indexMaxTimestampForEventIngested - ); - } - - BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); - // Query inside of the data stream index range - if (randomBoolean()) { - // Query generation - // We query a range within both timestamp ranges covered by both datastream indices - RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); - tsRangeQueryBuilder.from(indexMinTimestampForTs).to(indexMaxTimestampForTs); - - RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); - eventIngestedRangeQueryBuilder.from(indexMinTimestampForEventIngested).to(indexMaxTimestampForEventIngested); - - queryBuilder.filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); - - if (randomBoolean()) { - // Add an additional filter that cannot be evaluated in the coordinator but shouldn't - // affect the end result as we're filtering - queryBuilder.filter(new TermQueryBuilder("fake", "value")); - } - } else { - // We query a range outside of the both ranges covered by both datastream indices - RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestampForTs + 1) - .to(indexMaxTimestampForTs + 2); - RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME).from( - indexMaxTimestampForEventIngested + 1 - ).to(indexMaxTimestampForEventIngested + 2); - - TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); - - // This is always evaluated as true in the coordinator as we cannot determine there if - // the term query clause is false. - queryBuilder.should(tsRangeQueryBuilder).should(eventIngestedRangeQueryBuilder).should(termQueryBuilder); - } - - assignShardsAndExecuteCanMatchPhase( - List.of(dataStream), - regularIndices, - contextProviderBuilder.build(), - queryBuilder, - List.of(), - null, - this::assertAllShardsAreQueried - ); - } - public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withDefaultBackgroundFilter() throws Exception { Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); - String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); - StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, timeField, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, timeField, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, timeField, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timeField).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms"); assignShardsAndExecuteCanMatchPhase( @@ -813,22 +661,20 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w } public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withBackgroundFilter() throws Exception { - String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); - Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); Index index4 = new Index("index4", UUIDs.base64UUID()); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); - contextProviderBuilder.addIndexMinMaxTimestamps(index4, timestampField, 3000, 3999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index4, DataStream.TIMESTAMP_FIELD_NAME, 3000, 3999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(3100).to(3200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(3100).to(3200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(timestampField).from(0).to(1999) + new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(0).to(1999) ); assignShardsAndExecuteCanMatchPhase( @@ -857,53 +703,14 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); - String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); - StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(timestampField).from(2000).to(2300) - ); - SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); - - assignShardsAndExecuteCanMatchPhase( - List.of(), - List.of(index1, index2, index3), - contextProviderBuilder.build(), - query, - List.of(aggregation), - suggest, - // The query and aggregation and match only index3, but suggest should match everything. - this::assertAllShardsAreQueried - ); - } - - public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withSuggest_withTwoTimestamps() throws Exception { - Index index1 = new Index("index1", UUIDs.base64UUID()); - Index index2 = new Index("index2", UUIDs.base64UUID()); - Index index3 = new Index("index3", UUIDs.base64UUID()); - - StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index1, 0, 999, 0, 999); - contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index2, 1000, 1999, 1000, 1999); - contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index3, 2000, 2999, 2000, 2999); - - String fieldInRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; - String fieldOutOfRange = DataStream.TIMESTAMP_FIELD_NAME; - - if (randomBoolean()) { - fieldInRange = DataStream.TIMESTAMP_FIELD_NAME; - fieldOutOfRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; - } - - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(fieldInRange).from(2100).to(2200)) - .filter(new RangeQueryBuilder(fieldOutOfRange).from(8888).to(9999)); - AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(fieldInRange).from(2000).to(2300) + new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2000).to(2300) ); SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); @@ -937,13 +744,13 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedTsdb() throws Exce long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index index : dataStream1.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(index, DataStream.TIMESTAMP_FIELD_NAME, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps(index, indexMinTimestamp, indexMaxTimestamp); } for (Index index : dataStream2.getIndices()) { contextProviderBuilder.addIndex(index); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp"); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -1147,9 +954,9 @@ public void sendCanMatch( canMatchResultsConsumer.accept(updatedSearchShardIterators, requests); } - static class StaticCoordinatorRewriteContextProviderBuilder { + private static class StaticCoordinatorRewriteContextProviderBuilder { private ClusterState clusterState = ClusterState.EMPTY_STATE; - private final Map fields = new HashMap<>(); + private final Map fields = new HashMap<>(); private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTimeStamp, long maxTimestamp) { if (clusterState.metadata().index(index) != null) { @@ -1167,64 +974,35 @@ private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTim IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0); - if (fieldName.equals(DataStream.TIMESTAMP_FIELD_NAME)) { - indexMetadataBuilder.timestampRange(timestampRange); - fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(fieldName), null, null, null)); - } else if (fieldName.equals(IndexMetadata.EVENT_INGESTED_FIELD_NAME)) { - indexMetadataBuilder.eventIngestedRange(timestampRange, TransportVersion.current()); - fields.put(index, new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(fieldName), null)); - } + .numberOfReplicas(0) + .timestampRange(timestampRange); Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); + clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); + + fields.put(index, new DateFieldMapper.DateFieldType(fieldName)); } - /** - * Add min/max timestamps to IndexMetadata for the specified index for both @timestamp and 'event.ingested' - */ - private void addIndexMinMaxForTimestampAndEventIngested( - Index index, - long minTimestampForTs, - long maxTimestampForTs, - long minTimestampForEventIngested, - long maxTimestampForEventIngested - ) { + private void addIndexMinMaxTimestamps(Index index, long minTimestamp, long maxTimestamp) { if (clusterState.metadata().index(index) != null) { throw new IllegalArgumentException("Min/Max timestamps for " + index + " were already defined"); } - IndexLongFieldRange tsTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( - 0, - 1, - ShardLongFieldRange.of(minTimestampForTs, maxTimestampForTs) - ); - IndexLongFieldRange eventIngestedTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( - 0, - 1, - ShardLongFieldRange.of(minTimestampForEventIngested, maxTimestampForEventIngested) - ); - - Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "a_field") + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(minTimestamp)) + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(maxTimestamp)); IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0) - .timestampRange(tsTimestampRange) - .eventIngestedRange(eventIngestedTimestampRange, TransportVersion.current()); + .numberOfReplicas(0); Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put( - index, - new DateFieldRangeInfo( - new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), - null, - new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), - null - ) - ); + fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); } private void addIndex(Index index) { @@ -1240,7 +1018,7 @@ private void addIndex(Index index) { Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), null, null, null)); + fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); } public CoordinatorRewriteContextProvider build() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index a2d93bab3a505..271df2a971fb1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -59,7 +59,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -623,13 +622,13 @@ QueryRewriteContext createQueryRewriteContext() { } CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max) { - DateFieldRangeInfo timestampFieldInfo = new DateFieldRangeInfo( - dateFieldType, + return new CoordinatorRewriteContext( + parserConfiguration, + this.client, + () -> nowInMillis, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)), - dateFieldType, - IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)) + dateFieldType ); - return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo); } DataRewriteContext createDataContext() { diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index 6d962ec5baceb..36d4751423113 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; @@ -45,7 +44,6 @@ import java.time.Instant; import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING; @@ -78,15 +76,8 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx createIndex("index", 1, 1); - String timestampVal = "2010-01-06T02:03:04.567Z"; - String eventIngestedVal = "2010-01-06T02:03:05.567Z"; // one second later - - final DocWriteResponse indexResponse = prepareIndex("index").setSource( - DataStream.TIMESTAMP_FIELD_NAME, - timestampVal, - IndexMetadata.EVENT_INGESTED_FIELD_NAME, - eventIngestedVal - ).get(); + final DocWriteResponse indexResponse = prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z") + .get(); ensureGreen("index"); @@ -126,23 +117,13 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertTrue(timestampFieldRange.isComplete()); - assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse(timestampVal).toEpochMilli())); - assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse(timestampVal).toEpochMilli())); + assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); - IndexLongFieldRange eventIngestedFieldRange = clusterAdmin().prepareState() - .get() - .getState() - .metadata() - .index("index") - .getEventIngestedRange(); - assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); - assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertTrue(eventIngestedFieldRange.isComplete()); - assertThat(eventIngestedFieldRange.getMin(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); - assertThat(eventIngestedFieldRange.getMax(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); + assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); } - public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { + public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception { internalCluster().startNodes(between(2, 4)); final String locale; @@ -200,11 +181,11 @@ public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() ensureGreen("index"); if (randomBoolean()) { - prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date, IndexMetadata.EVENT_INGESTED_FIELD_NAME, date).get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date).get(); } for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldTypeInfo(index)); + assertNull(indicesService.getTimestampFieldType(index)); } assertAcked( @@ -212,129 +193,15 @@ public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - final PlainActionFuture> future = new PlainActionFuture<>(); + final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); assertBusy(() -> { - DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); - DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.getTimestampFieldType(); - DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.getEventIngestedFieldType(); - assertNotNull(eventIngestedFieldType); + final DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(index); assertNotNull(timestampFieldType); - future.onResponse( - Map.of( - DataStream.TIMESTAMP_FIELD_NAME, - timestampFieldType, - IndexMetadata.EVENT_INGESTED_FIELD_NAME, - eventIngestedFieldType - ) - ); - }); - assertTrue(future.isDone()); - assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); - assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); - assertThat(future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); - assertThat( - future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().parseMillis(date), - equalTo(1580817683000L) - ); - } - - assertAcked( - client().execute( - FreezeIndexAction.INSTANCE, - new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index").setFreeze(false) - ).actionGet() - ); - ensureGreen("index"); - for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldTypeInfo(index)); - } - } - - public void testTimestampOrEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { - internalCluster().startNodes(between(2, 4)); - - final String locale; - final String date; - - switch (between(1, 3)) { - case 1 -> { - locale = ""; - date = "04 Feb 2020 12:01:23Z"; - } - case 2 -> { - locale = "en_GB"; - date = "04 Feb 2020 12:01:23Z"; - } - case 3 -> { - locale = "fr_FR"; - date = "04 févr. 2020 12:01:23Z"; - } - default -> throw new AssertionError("impossible"); - } - - String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); - assertAcked( - prepareCreate("index").setSettings( - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - ) - .setMapping( - jsonBuilder().startObject() - .startObject("_doc") - .startObject("properties") - .startObject(timeField) - .field("type", "date") - .field("format", "dd LLL yyyy HH:mm:ssX") - .field("locale", locale) - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - - final Index index = clusterAdmin().prepareState() - .clear() - .setIndices("index") - .setMetadata(true) - .get() - .getState() - .metadata() - .index("index") - .getIndex(); - - ensureGreen("index"); - if (randomBoolean()) { - prepareIndex("index").setSource(timeField, date).get(); - } - - for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldTypeInfo(index)); - } - - assertAcked( - client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() - ); - ensureGreen("index"); - for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - // final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); - final PlainActionFuture> future = new PlainActionFuture<>(); - assertBusy(() -> { - DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); - DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.getTimestampFieldType(); - DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.getEventIngestedFieldType(); - if (timeField == DataStream.TIMESTAMP_FIELD_NAME) { - assertNotNull(timestampFieldType); - assertNull(eventIngestedFieldType); - future.onResponse(Map.of(timeField, timestampFieldType)); - } else { - assertNull(timestampFieldType); - assertNotNull(eventIngestedFieldType); - future.onResponse(Map.of(timeField, eventIngestedFieldType)); - } + timestampFieldTypeFuture.onResponse(timestampFieldType); }); - assertTrue(future.isDone()); - assertThat(future.get().get(timeField).dateTimeFormatter().locale().toString(), equalTo(locale)); - assertThat(future.get().get(timeField).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertTrue(timestampFieldTypeFuture.isDone()); + assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); } assertAcked( @@ -345,7 +212,7 @@ public void testTimestampOrEventIngestedFieldTypeExposedByAllIndicesServices() t ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldTypeInfo(index)); + assertNull(indicesService.getTimestampFieldType(index)); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 6dfe1c5835285..5204bdfcc78e6 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; @@ -101,11 +100,11 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestampAndEventIngested(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + createIndexWithTimestamp(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestampAndEventIngested( + createIndexWithTimestamp( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -118,10 +117,11 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Either add data outside of the range, or documents that don't have timestamp data final boolean indexDataWithTimestamp = randomBoolean(); // Add enough documents to have non-metadata segment files in all shards, - // otherwise the mount operation might go through as the read won't be blocked + // otherwise the mount operation might go through as the read won't be + // blocked final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); if (indexDataWithTimestamp) { - indexDocumentsWithTimestampAndEventIngestedDates( + indexDocumentsWithTimestampWithinDate( indexOutsideSearchRange, numberOfDocsInIndexOutsideSearchRange, TIMESTAMP_TEMPLATE_OUTSIDE_RANGE @@ -132,7 +132,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Index enough documents to ensure that all shards have at least some documents int numDocsWithinRange = between(100, 1000); - indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -166,10 +166,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); - assertThat(timestampFieldTypeInfo, nullValue()); + DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); + assertThat(timestampFieldType, nullValue()); final boolean includeIndexCoveringSearchRangeInSearchRequest = randomBoolean(); List indicesToSearch = new ArrayList<>(); @@ -177,9 +176,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying indicesToSearch.add(indexWithinSearchRange); } indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); - - String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -253,44 +250,20 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - - // check that @timestamp and 'event.ingested' are now in cluster state final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); + final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); + assertThat(dateFieldType, notNullValue()); + final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); - final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); - assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); - - timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); - final DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.getTimestampFieldType(); - assertThat(timestampDataFieldType, notNullValue()); - final DateFieldMapper.DateFieldType eventIngestedDataFieldType = timestampFieldTypeInfo.getEventIngestedFieldType(); - assertThat(eventIngestedDataFieldType, notNullValue()); - - final DateFieldMapper.Resolution timestampResolution = timestampDataFieldType.resolution(); - final DateFieldMapper.Resolution eventIngestedResolution = eventIngestedDataFieldType.resolution(); if (indexDataWithTimestamp) { assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertThat( updatedTimestampMillisRange.getMin(), - greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) - ); - assertThat( - updatedTimestampMillisRange.getMax(), - lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) - ); - - assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat( - updatedEventIngestedRange.getMin(), - greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) - ); - assertThat( - updatedEventIngestedRange.getMax(), - lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) ); + assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); } else { assertThat(updatedTimestampMillisRange, sameInstance(IndexLongFieldRange.EMPTY)); - assertThat(updatedEventIngestedRange, sameInstance(IndexLongFieldRange.EMPTY)); } // Stop the node holding the searchable snapshots, and since we defined @@ -410,171 +383,6 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } - /** - * Test shard skipping when only 'event.ingested' is in the index and cluster state. - */ - public void testEventIngestedRangeInSearchAgainstSearchableSnapshotShards() throws Exception { - internalCluster().startMasterOnlyNode(); - internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); - final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode(); - final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode(); - final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot); - - final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - - final String timestampField = IndexMetadata.EVENT_INGESTED_FIELD_NAME; - - createIndexWithOnlyOneTimestampField(timestampField, indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); - - final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithOnlyOneTimestampField( - timestampField, - indexWithinSearchRange, - indexWithinSearchRangeShardCount, - Settings.builder() - .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) - .build() - ); - - final int totalShards = indexOutsideSearchRangeShardCount + indexWithinSearchRangeShardCount; - - // Add enough documents to have non-metadata segment files in all shards, - // otherwise the mount operation might go through as the read won't be blocked - final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); - - indexDocumentsWithOnlyOneTimestampField( - timestampField, - indexOutsideSearchRange, - numberOfDocsInIndexOutsideSearchRange, - TIMESTAMP_TEMPLATE_OUTSIDE_RANGE - ); - - // Index enough documents to ensure that all shards have at least some documents - int numDocsWithinRange = between(100, 1000); - indexDocumentsWithOnlyOneTimestampField( - timestampField, - indexWithinSearchRange, - numDocsWithinRange, - TIMESTAMP_TEMPLATE_WITHIN_RANGE - ); - - final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - createRepository(repositoryName, "mock"); - - final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexOutsideSearchRange)).snapshotId(); - assertAcked(indicesAdmin().prepareDelete(indexOutsideSearchRange)); - - final String searchableSnapshotIndexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - - // Block the repository for the node holding the searchable snapshot shards - // to delay its restore - blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot); - - // Force the searchable snapshot to be allocated in a particular node - Settings restoredIndexSettings = Settings.builder() - .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot) - .build(); - - final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( - TEST_REQUEST_TIMEOUT, - searchableSnapshotIndexOutsideSearchRange, - repositoryName, - snapshotId.getName(), - indexOutsideSearchRange, - restoredIndexSettings, - Strings.EMPTY_ARRAY, - false, - randomFrom(MountSearchableSnapshotRequest.Storage.values()) - ); - client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); - - final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - - // Allow the searchable snapshots to be finally mounted - unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot); - waitUntilRecoveryIsDone(searchableSnapshotIndexOutsideSearchRange); - ensureGreen(searchableSnapshotIndexOutsideSearchRange); - - IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); - - // @timestamp range should be null since it was not included in the index or indexed docs - assertThat(updatedTimestampMillisRange, equalTo(IndexLongFieldRange.UNKNOWN)); - assertThat(updatedEventIngestedMillisRange, not(equalTo(IndexLongFieldRange.UNKNOWN))); - - DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); - - DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.getTimestampFieldType(); - assertThat(timestampDataFieldType, nullValue()); - - DateFieldMapper.DateFieldType eventIngestedFieldType = timestampFieldTypeInfo.getEventIngestedFieldType(); - assertThat(eventIngestedFieldType, notNullValue()); - - DateFieldMapper.Resolution eventIngestedResolution = eventIngestedFieldType.resolution(); - assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); - assertThat( - updatedEventIngestedMillisRange.getMin(), - greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) - ); - assertThat( - updatedEventIngestedMillisRange.getMax(), - lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) - ); - - // now do a search against event.ingested - List indicesToSearch = new ArrayList<>(); - indicesToSearch.add(indexWithinSearchRange); - indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); - - { - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) - .from("2020-11-28T00:00:00.000000000Z", true) - .to("2020-11-29T00:00:00.000000000Z"); - - SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) - .source(new SearchSourceBuilder().query(rangeQuery)); - - assertResponse(client().search(request), searchResponse -> { - // All the regular index searches succeeded - assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); - assertThat(searchResponse.getFailedShards(), equalTo(0)); - // All the searchable snapshots shards were skipped - assertThat(searchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); - assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); - }); - - SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); - assertThat(searchShardResult.skipped().size(), equalTo(indexOutsideSearchRangeShardCount)); - assertThat(searchShardResult.notSkipped().size(), equalTo(indexWithinSearchRangeShardCount)); - } - - // query a range that covers both indexes - all shards should be searched, none skipped - { - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) - .from("2019-11-28T00:00:00.000000000Z", true) - .to("2021-11-29T00:00:00.000000000Z"); - - SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) - .source(new SearchSourceBuilder().query(rangeQuery)); - - assertResponse(client().search(request), searchResponse -> { - assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); - assertThat(searchResponse.getFailedShards(), equalTo(0)); - assertThat(searchResponse.getSkippedShards(), equalTo(0)); - assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); - }); - - SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); - assertThat(searchShardResult.skipped().size(), equalTo(0)); - assertThat(searchShardResult.notSkipped().size(), equalTo(totalShards)); - } - } - /** * Can match against searchable snapshots is tested via both the Search API and the SearchShards (transport-only) API. * The latter is a way to do only a can-match rather than all search phases. @@ -588,7 +396,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestampAndEventIngested( + createIndexWithTimestamp( indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.builder() @@ -596,7 +404,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() .build() ); - indexDocumentsWithTimestampAndEventIngestedDates(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); + indexDocumentsWithTimestampWithinDate(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -630,14 +438,11 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); - assertThat(timestampFieldTypeInfo, nullValue()); + DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); + assertThat(timestampFieldType, nullValue()); - final String timestampField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); - - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -695,29 +500,14 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); - assertThat(timestampFieldTypeInfo, notNullValue()); - - final IndexLongFieldRange updatedTimestampRange = updatedIndexMetadata.getTimestampRange(); - DateFieldMapper.Resolution tsResolution = timestampFieldTypeInfo.getTimestampFieldType().resolution(); - ; - assertThat(updatedTimestampRange.isComplete(), equalTo(true)); - assertThat(updatedTimestampRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampRange.getMin(), greaterThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); - assertThat(updatedTimestampRange.getMax(), lessThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); - - final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); - DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.getEventIngestedFieldType().resolution(); - assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); - assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat( - updatedEventIngestedRange.getMin(), - greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) - ); - assertThat( - updatedEventIngestedRange.getMax(), - lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) - ); + final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); + final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); + assertThat(dateFieldType, notNullValue()); + final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); + assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); + assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); + assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -789,7 +579,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestampAndEventIngested( + createIndexWithTimestamp( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -797,7 +587,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo .build() ); - indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -831,13 +621,11 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); - assertThat(timestampFieldTypeInfo, nullValue()); + DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); + assertThat(timestampFieldType, nullValue()); - String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -892,32 +680,13 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); - assertThat(timestampFieldTypeInfo, notNullValue()); - final DateFieldMapper.Resolution timestampResolution = timestampFieldTypeInfo.getTimestampFieldType().resolution(); + final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); + assertThat(dateFieldType, notNullValue()); + final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat( - updatedTimestampMillisRange.getMin(), - greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) - ); - assertThat( - updatedTimestampMillisRange.getMax(), - lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) - ); - - final IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); - final DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.getEventIngestedFieldType().resolution(); - assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); - assertThat(updatedEventIngestedMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat( - updatedEventIngestedMillisRange.getMin(), - greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) - ); - assertThat( - updatedEventIngestedMillisRange.getMax(), - lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) - ); + assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-28T00:00:00Z")))); + assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-29T00:00:00Z")))); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -955,24 +724,17 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo } } - private void createIndexWithTimestampAndEventIngested(String indexName, int numShards, Settings extraSettings) throws IOException { + private void createIndexWithTimestamp(String indexName, int numShards, Settings extraSettings) throws IOException { assertAcked( indicesAdmin().prepareCreate(indexName) .setMapping( XContentFactory.jsonBuilder() .startObject() .startObject("properties") - .startObject(DataStream.TIMESTAMP_FIELD_NAME) .field("type", randomFrom("date", "date_nanos")) .field("format", "strict_date_optional_time_nanos") .endObject() - - .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) - .field("type", randomFrom("date", "date_nanos")) - .field("format", "strict_date_optional_time_nanos") - .endObject() - .endObject() .endObject() ) @@ -981,70 +743,12 @@ private void createIndexWithTimestampAndEventIngested(String indexName, int numS ensureGreen(indexName); } - private void createIndexWithOnlyOneTimestampField(String timestampField, String index, int numShards, Settings extraSettings) - throws IOException { - assertAcked( - indicesAdmin().prepareCreate(index) - .setMapping( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - - .startObject(timestampField) - .field("type", randomFrom("date", "date_nanos")) - .field("format", "strict_date_optional_time_nanos") - .endObject() - - .endObject() - .endObject() - ) - .setSettings(indexSettingsNoReplicas(numShards).put(INDEX_SOFT_DELETES_SETTING.getKey(), true).put(extraSettings)) - ); - ensureGreen(index); - } - - private void indexDocumentsWithOnlyOneTimestampField(String timestampField, String index, int docCount, String timestampTemplate) - throws Exception { - final List indexRequestBuilders = new ArrayList<>(); - for (int i = 0; i < docCount; i++) { - indexRequestBuilders.add( - prepareIndex(index).setSource( - timestampField, - String.format( - Locale.ROOT, - timestampTemplate, - between(0, 23), - between(0, 59), - between(0, 59), - randomLongBetween(0, 999999999L) - ) - ) - ); - } - indexRandom(true, false, indexRequestBuilders); - - assertThat(indicesAdmin().prepareForceMerge(index).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), equalTo(0)); - refresh(index); - forceMerge(); - } - - private void indexDocumentsWithTimestampAndEventIngestedDates(String indexName, int docCount, String timestampTemplate) - throws Exception { - + private void indexDocumentsWithTimestampWithinDate(String indexName, int docCount, String timestampTemplate) throws Exception { final List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < docCount; i++) { indexRequestBuilders.add( prepareIndex(indexName).setSource( DataStream.TIMESTAMP_FIELD_NAME, - String.format( - Locale.ROOT, - timestampTemplate, - between(0, 23), - between(0, 59), - between(0, 59), - randomLongBetween(0, 999999999L) - ), - IndexMetadata.EVENT_INGESTED_FIELD_NAME, String.format( Locale.ROOT, timestampTemplate, @@ -1085,39 +789,4 @@ private void waitUntilRecoveryIsDone(String index) throws Exception { private void waitUntilAllShardsAreUnassigned(Index index) throws Exception { awaitClusterState(state -> state.getRoutingTable().index(index).allPrimaryShardsUnassigned()); } - - record SearchShardAPIResult(List skipped, List notSkipped) {} - - private static SearchShardAPIResult doSearchShardAPIQuery( - List indicesToSearch, - RangeQueryBuilder rangeQuery, - boolean allowPartialSearchResults, - int expectedTotalShards - ) { - SearchShardsRequest searchShardsRequest = new SearchShardsRequest( - indicesToSearch.toArray(new String[0]), - SearchRequest.DEFAULT_INDICES_OPTIONS, - rangeQuery, - null, - null, - allowPartialSearchResults, - null - ); - - SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); - assertThat(searchShardsResponse.getGroups().size(), equalTo(expectedTotalShards)); - List> partitionedBySkipped = searchShardsResponse.getGroups() - .stream() - .collect( - Collectors.teeing( - Collectors.filtering(g -> g.skipped(), Collectors.toList()), - Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), - List::of - ) - ); - - List skipped = partitionedBySkipped.get(0); - List notSkipped = partitionedBySkipped.get(1); - return new SearchShardAPIResult(skipped, notSkipped); - } } From d07becadaff0b94061931f8bfbd1a0b4a8870eb0 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 15 Jul 2024 11:42:23 -0700 Subject: [PATCH 061/406] Add runtime classpath as input to ThirdPartyAuditTask (#110882) (#110889) The runtime classpath of `ThirdPartyAuditTask` was not being tracked as an input which can cause issues with task output caching and build avoidance. --- .../gradle/internal/precommit/ThirdPartyAuditTask.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 4263ef2b1f76f..489cff65976b1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -193,6 +193,11 @@ public Set getMissingClassExcludes() { @SkipWhenEmpty public abstract ConfigurableFileCollection getJarsToScan(); + @Classpath + public FileCollection getClasspath() { + return classpath; + } + @TaskAction public void runThirdPartyAudit() throws IOException { Set jars = getJarsToScan().getFiles(); From 07c35b186ab504dfbdf9d63301098e5859e74d93 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 15 Jul 2024 15:57:40 -0700 Subject: [PATCH 062/406] Fix native preallocate to actually run (#110851) Native preallocation has several issues, introduced in a refactoring for 8.13. First, the native allocator is never even tried, it always decides to fall back to the Java setLength method. Second, the stat method did not work correctly on all systems, see #110807. This commit fixes native preallocate to properly execute on Linux, as well as MacOS. It also adds direct tests of preallocation. Note that this is meant as a bugfix for 8.15, so as minimal a change as possible is made here. The code has completely changed in main. Some things like the new test and fixes for macos will be forward ported to main, but I did not want to make larger changes in a bugfix. --- docs/changelog/110851.yaml | 5 + libs/preallocate/build.gradle | 5 + .../src/main/java/module-info.java | 2 +- .../AbstractPosixPreallocator.java | 40 +++- .../preallocate/MacOsPreallocator.java | 88 +++++--- .../preallocate/Preallocate.java | 20 +- .../preallocate/PreallocateTests.java | 30 +++ server/build.gradle | 3 +- server/src/main/java/module-info.java | 1 + .../bootstrap/Elasticsearch.java | 6 +- .../common/filesystem/FileSystemNatives.java | 4 +- .../filesystem/LinuxFileSystemNatives.java | 197 ------------------ .../filesystem/PosixFileSystemNatives.java | 137 ++++++++++++ .../bootstrap/BootstrapForTesting.java | 6 +- 14 files changed, 299 insertions(+), 245 deletions(-) create mode 100644 docs/changelog/110851.yaml create mode 100644 libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java delete mode 100644 server/src/main/java/org/elasticsearch/common/filesystem/LinuxFileSystemNatives.java create mode 100644 server/src/main/java/org/elasticsearch/common/filesystem/PosixFileSystemNatives.java diff --git a/docs/changelog/110851.yaml b/docs/changelog/110851.yaml new file mode 100644 index 0000000000000..bea774e89dee0 --- /dev/null +++ b/docs/changelog/110851.yaml @@ -0,0 +1,5 @@ +pr: 110851 +summary: Fix native preallocate to actually run +area: Infra/Core +type: bug +issues: [] diff --git a/libs/preallocate/build.gradle b/libs/preallocate/build.gradle index a490c7168516e..2bc802daee1d2 100644 --- a/libs/preallocate/build.gradle +++ b/libs/preallocate/build.gradle @@ -11,6 +11,11 @@ dependencies { implementation project(':libs:elasticsearch-core') implementation project(':libs:elasticsearch-logging') implementation "net.java.dev.jna:jna:${versions.jna}" + + testImplementation "junit:junit:${versions.junit}" + testImplementation(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-preallocate' + } } tasks.named('forbiddenApisMain').configure { diff --git a/libs/preallocate/src/main/java/module-info.java b/libs/preallocate/src/main/java/module-info.java index 89c85d95ab2f0..4e980b083701a 100644 --- a/libs/preallocate/src/main/java/module-info.java +++ b/libs/preallocate/src/main/java/module-info.java @@ -11,7 +11,7 @@ requires org.elasticsearch.logging; requires com.sun.jna; - exports org.elasticsearch.preallocate to org.elasticsearch.blobcache, com.sun.jna; + exports org.elasticsearch.preallocate to org.elasticsearch.blobcache, com.sun.jna, org.elasticsearch.server; provides org.elasticsearch.jdk.ModuleQualifiedExportsService with org.elasticsearch.preallocate.PreallocateModuleExportsService; } diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java index e841b38c0059e..e748ddc391176 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java @@ -8,21 +8,27 @@ package org.elasticsearch.preallocate; -import com.sun.jna.FunctionMapper; import com.sun.jna.Library; import com.sun.jna.Native; +import com.sun.jna.NativeLibrary; import com.sun.jna.NativeLong; import com.sun.jna.Platform; import com.sun.jna.Structure; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + import java.io.IOException; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.Arrays; +import java.util.List; import java.util.Locale; -import java.util.Map; abstract class AbstractPosixPreallocator implements Preallocator { + static final Logger logger = LogManager.getLogger(AbstractPosixPreallocator.class); + /** * Constants relating to posix libc. * @@ -35,7 +41,7 @@ protected record PosixConstants(int SIZEOF_STAT, int STAT_ST_SIZE_OFFSET, int O_ private static final int O_WRONLY = 1; - static final class Stat64 extends Structure implements Structure.ByReference { + public static final class Stat64 extends Structure implements Structure.ByReference { public byte[] _ignore1; public NativeLong st_size = new NativeLong(0); public byte[] _ignore2; @@ -44,6 +50,11 @@ static final class Stat64 extends Structure implements Structure.ByReference { this._ignore1 = new byte[stSizeOffset]; this._ignore2 = new byte[sizeof - stSizeOffset - 8]; } + + @Override + protected List getFieldOrder() { + return Arrays.asList("_ignore1", "st_size", "_ignore2"); + } } private interface NativeFunctions extends Library { @@ -58,6 +69,10 @@ private interface FStat64Function extends Library { int fstat64(int fd, Stat64 stat); } + private interface FXStatFunction extends Library { + int __fxstat(int version, int fd, Stat64 stat); + } + public static final boolean NATIVES_AVAILABLE; private static final NativeFunctions functions; private static final FStat64Function fstat64; @@ -67,18 +82,29 @@ private interface FStat64Function extends Library { try { return Native.load(Platform.C_LIBRARY_NAME, NativeFunctions.class); } catch (final UnsatisfiedLinkError e) { + logger.warn("Failed to load posix functions for preallocate"); return null; } }); fstat64 = AccessController.doPrivileged((PrivilegedAction) () -> { try { + // JNA lazily finds symbols, so even though we try to bind two different functions below, if fstat64 + // isn't found, we won't know until runtime when calling the function. To force resolution of the + // symbol we get a function object directly from the native library. We don't use it, we just want to + // see if it will throw UnsatisfiedLinkError + NativeLibrary.getInstance(Platform.C_LIBRARY_NAME).getFunction("fstat64"); return Native.load(Platform.C_LIBRARY_NAME, FStat64Function.class); } catch (final UnsatisfiedLinkError e) { + // fstat has a long history in linux from the 32-bit architecture days. On some modern linux systems, + // fstat64 doesn't exist as a symbol in glibc. Instead, the compiler replaces fstat64 calls with + // the internal __fxstat method. Here we fall back to __fxstat, and statically bind the special + // "version" argument so that the call site looks the same as that of fstat64 try { - // on Linux fstat64 isn't available as a symbol, but instead uses a special __ name - var options = Map.of(Library.OPTION_FUNCTION_MAPPER, (FunctionMapper) (lib, method) -> "__fxstat64"); - return Native.load(Platform.C_LIBRARY_NAME, FStat64Function.class, options); + var fxstat = Native.load(Platform.C_LIBRARY_NAME, FXStatFunction.class); + int version = System.getProperty("os.arch").equals("aarch64") ? 0 : 1; + return (fd, stat) -> fxstat.__fxstat(version, fd, stat); } catch (UnsatisfiedLinkError e2) { + logger.warn("Failed to load __fxstat for preallocate"); return null; } } @@ -124,7 +150,7 @@ public void close() throws IOException { @Override public boolean useNative() { - return false; + return NATIVES_AVAILABLE; } @Override diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java index 149cf80527bd0..f80d6cbafd5cd 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java @@ -7,18 +7,27 @@ */ package org.elasticsearch.preallocate; +import com.sun.jna.Library; +import com.sun.jna.Memory; import com.sun.jna.Native; import com.sun.jna.NativeLong; import com.sun.jna.Platform; -import com.sun.jna.Structure; +import java.lang.invoke.MethodHandles; import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.List; final class MacOsPreallocator extends AbstractPosixPreallocator { + static { + try { + MethodHandles.lookup().ensureInitialized(Natives.class); + logger.info("Initialized macos natives: " + Natives.NATIVES_AVAILABLE); + } catch (IllegalAccessException unexpected) { + throw new AssertionError(unexpected); + } + } + MacOsPreallocator() { super(new PosixConstants(144, 96, 512)); } @@ -31,21 +40,25 @@ public boolean useNative() { @Override public int preallocate(final int fd, final long currentSize /* unused */ , final long fileSize) { // the Structure.ByReference constructor requires access to declared members - final Natives.Fcntl.FStore fst = AccessController.doPrivileged((PrivilegedAction) Natives.Fcntl.FStore::new); - fst.fst_flags = Natives.Fcntl.F_ALLOCATECONTIG; - fst.fst_posmode = Natives.Fcntl.F_PEOFPOSMODE; - fst.fst_offset = new NativeLong(0); - fst.fst_length = new NativeLong(fileSize); + final Natives.Fcntl.FStore fst = new Natives.Fcntl.FStore(); + fst.setFlags(Natives.Fcntl.F_ALLOCATECONTIG); + fst.setPosmode(Natives.Fcntl.F_PEOFPOSMODE); + fst.setOffset(0); + fst.setLength(fileSize); // first, try allocating contiguously - if (Natives.fcntl(fd, Natives.Fcntl.F_PREALLOCATE, fst) != 0) { + logger.info("Calling fcntl for preallocate"); + if (Natives.functions.fcntl(fd, Natives.Fcntl.F_PREALLOCATE, fst.memory) != 0) { + logger.warn("Failed to get contiguous preallocate, trying non-contiguous"); // that failed, so let us try allocating non-contiguously - fst.fst_flags = Natives.Fcntl.F_ALLOCATEALL; - if (Natives.fcntl(fd, Natives.Fcntl.F_PREALLOCATE, fst) != 0) { + fst.setFlags(Natives.Fcntl.F_ALLOCATEALL); + if (Natives.functions.fcntl(fd, Natives.Fcntl.F_PREALLOCATE, fst.memory) != 0) { + logger.warn("Failed to get non-continugous preallocate"); // i'm afraid captain dale had to bail return Native.getLastError(); } } - if (Natives.ftruncate(fd, new NativeLong(fileSize)) != 0) { + if (Natives.functions.ftruncate(fd, new NativeLong(fileSize)) != 0) { + logger.warn("Failed to ftruncate"); return Native.getLastError(); } return 0; @@ -53,17 +66,20 @@ public int preallocate(final int fd, final long currentSize /* unused */ , final private static class Natives { - static boolean NATIVES_AVAILABLE; + static final boolean NATIVES_AVAILABLE; + static final NativeFunctions functions; static { - NATIVES_AVAILABLE = AccessController.doPrivileged((PrivilegedAction) () -> { + NativeFunctions nativeFunctions = AccessController.doPrivileged((PrivilegedAction) () -> { try { - Native.register(Natives.class, Platform.C_LIBRARY_NAME); + return Native.load(Platform.C_LIBRARY_NAME, NativeFunctions.class); } catch (final UnsatisfiedLinkError e) { - return false; + logger.warn("Failed to load macos native preallocate functions"); + return null; } - return true; }); + functions = nativeFunctions; + NATIVES_AVAILABLE = nativeFunctions != null; } static class Fcntl { @@ -79,25 +95,37 @@ static class Fcntl { @SuppressWarnings("unused") private static final int F_VOLPOSMODE = 4; // allocate from the volume offset - public static final class FStore extends Structure implements Structure.ByReference { - public int fst_flags = 0; - public int fst_posmode = 0; - public NativeLong fst_offset = new NativeLong(0); - public NativeLong fst_length = new NativeLong(0); - @SuppressWarnings("unused") - public NativeLong fst_bytesalloc = new NativeLong(0); - - @Override - protected List getFieldOrder() { - return Arrays.asList("fst_flags", "fst_posmode", "fst_offset", "fst_length", "fst_bytesalloc"); + public static final class FStore { + final Memory memory = new Memory(32); + + public void setFlags(int flags) { + memory.setInt(0, flags); + } + + public void setPosmode(int posmode) { + memory.setInt(4, posmode); + } + + public void setOffset(long offset) { + memory.setLong(8, offset); + } + + public void setLength(long length) { + memory.setLong(16, length); + } + + public void getBytesalloc() { + memory.getLong(24); } } } - static native int fcntl(int fd, int cmd, Fcntl.FStore fst); + private interface NativeFunctions extends Library { + int fcntl(int fd, int cmd, Object... args); - static native int ftruncate(int fd, NativeLong length); + int ftruncate(int fd, NativeLong length); + } } } diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java index 8f7214e0877ba..d4b65c95719bc 100644 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java +++ b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java @@ -15,6 +15,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; +import java.lang.invoke.MethodHandles; import java.lang.reflect.Field; import java.nio.file.Files; import java.nio.file.Path; @@ -24,12 +25,27 @@ public class Preallocate { private static final Logger logger = LogManager.getLogger(Preallocate.class); - private static final boolean IS_LINUX; - private static final boolean IS_MACOS; + static final boolean IS_LINUX; + static final boolean IS_MACOS; static { String osName = System.getProperty("os.name"); IS_LINUX = osName.startsWith("Linux"); IS_MACOS = osName.startsWith("Mac OS X"); + + // make sure the allocator native methods are initialized + Class clazz = null; + if (IS_LINUX) { + clazz = LinuxPreallocator.class; + } else if (IS_MACOS) { + clazz = MacOsPreallocator.class; + } + if (clazz != null) { + try { + MethodHandles.lookup().ensureInitialized(clazz); + } catch (IllegalAccessException unexpected) { + throw new AssertionError(unexpected); + } + } } public static void preallocate(final Path cacheFile, final long fileSize) throws IOException { diff --git a/libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java b/libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java new file mode 100644 index 0000000000000..e65327f9cd1d2 --- /dev/null +++ b/libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.preallocate; + +import org.elasticsearch.common.filesystem.FileSystemNatives; +import org.elasticsearch.test.ESTestCase; + +import java.nio.file.Path; +import java.util.OptionalLong; + +import static org.hamcrest.Matchers.equalTo; + +public class PreallocateTests extends ESTestCase { + public void testPreallocate() throws Exception { + Path cacheFile = createTempFile(); + long size = 1024 * 1024; // 1 MB + Preallocate.preallocate(cacheFile, size); + OptionalLong foundSize = FileSystemNatives.allocatedSizeInBytes(cacheFile); + assertTrue(foundSize.isPresent()); + // Note that on Windows the fallback setLength is used. Although that creates a sparse + // file on Linux/MacOS, it full allocates the file on Windows + assertThat(foundSize.getAsLong(), equalTo(size)); + } +} diff --git a/server/build.gradle b/server/build.gradle index e62abed2bc75a..2628158c7d14a 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -40,8 +40,7 @@ dependencies { implementation project(":libs:elasticsearch-simdvec") implementation project(':libs:elasticsearch-plugin-classloader') - // no compile dependency by server, but server defines security policy for this codebase so it i> - runtimeOnly project(":libs:elasticsearch-preallocate") + implementation project(":libs:elasticsearch-preallocate") // lucene api "org.apache.lucene:lucene-core:${versions.lucene}" diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index aaf8b3d0c8d84..cd17a435388d3 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -33,6 +33,7 @@ requires org.elasticsearch.grok; requires org.elasticsearch.tdigest; requires org.elasticsearch.simdvec; + requires org.elasticsearch.preallocate; requires com.sun.jna; requires hppc; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 082e1dd9257e0..072e2eed42c20 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -40,6 +40,7 @@ import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.preallocate.Preallocate; import java.io.IOException; import java.io.InputStream; @@ -195,7 +196,8 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { SubscribableListener.class, RunOnce.class, // We eagerly initialize to work around log4j permissions & JDK-8309727 - VectorUtil.class + VectorUtil.class, + Preallocate.class ); // install SM after natives, shutdown hooks, etc. @@ -209,7 +211,7 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { private static void ensureInitialized(Class... classes) { for (final var clazz : classes) { try { - MethodHandles.publicLookup().ensureInitialized(clazz); + MethodHandles.lookup().ensureInitialized(clazz); } catch (IllegalAccessException unexpected) { throw new AssertionError(unexpected); } diff --git a/server/src/main/java/org/elasticsearch/common/filesystem/FileSystemNatives.java b/server/src/main/java/org/elasticsearch/common/filesystem/FileSystemNatives.java index 00502d64b3896..3bcd76778e032 100644 --- a/server/src/main/java/org/elasticsearch/common/filesystem/FileSystemNatives.java +++ b/server/src/main/java/org/elasticsearch/common/filesystem/FileSystemNatives.java @@ -37,8 +37,8 @@ private static Provider loadJnaProvider() { Class.forName("com.sun.jna.Native"); if (Constants.WINDOWS) { return WindowsFileSystemNatives.getInstance(); - } else if (Constants.LINUX && Constants.JRE_IS_64BIT) { - return LinuxFileSystemNatives.getInstance(); + } else if (Constants.JRE_IS_64BIT) { + return PosixFileSystemNatives.getInstance(); } } catch (ClassNotFoundException e) { logger.warn("JNA not found. FileSystemNatives methods will be disabled.", e); diff --git a/server/src/main/java/org/elasticsearch/common/filesystem/LinuxFileSystemNatives.java b/server/src/main/java/org/elasticsearch/common/filesystem/LinuxFileSystemNatives.java deleted file mode 100644 index b40fb5c2e145b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/filesystem/LinuxFileSystemNatives.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.filesystem; - -import com.sun.jna.LastErrorException; -import com.sun.jna.Native; -import com.sun.jna.Platform; -import com.sun.jna.Structure; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.time.Instant; -import java.util.OptionalLong; - -import static org.elasticsearch.core.Strings.format; - -/** - * {@link FileSystemNatives.Provider} implementation for Linux x86-64bits - */ -final class LinuxFileSystemNatives implements FileSystemNatives.Provider { - - private static final Logger logger = LogManager.getLogger(LinuxFileSystemNatives.class); - - private static final LinuxFileSystemNatives INSTANCE = new LinuxFileSystemNatives(); - - /** st_blocks field indicates the number of blocks allocated to the file, 512-byte units **/ - private static final long ST_BLOCKS_UNIT = 512L; - - /** - * Version of the `struct stat' data structure. - * - * To allow the `struct stat' structure bits to vary without changing shared library major version number, the `stat' function is often - * an inline wrapper around `xstat' which takes a leading version-number argument designating the data structure and bits used. - * - * In glibc this version is defined in bits/stat.h (or bits/struct_stat.h in glibc 2.33, or bits/xstatver.h in more recent versions). - * - * For x86-64 the _STAT_VER used is: - * # define _STAT_VER_LINUX 1 - * # define _STAT_VER _STAT_VER_LINUX - * - * For other architectures the _STAT_VER used is: - * # define _STAT_VER_LINUX 0 - * # define _STAT_VER _STAT_VER_LINUX - **/ - private static int loadStatVersion() { - return "aarch64".equalsIgnoreCase(Constants.OS_ARCH) ? 0 : 1; - } - - private static final int STAT_VER = loadStatVersion(); - - private LinuxFileSystemNatives() { - assert Constants.LINUX : Constants.OS_NAME; - assert Constants.JRE_IS_64BIT : Constants.OS_ARCH; - try { - Native.register(XStatLibrary.class, Platform.C_LIBRARY_NAME); - logger.debug("C library loaded"); - } catch (LinkageError e) { - logger.warn("unable to link C library. native methods and handlers will be disabled.", e); - throw e; - } - } - - static LinuxFileSystemNatives getInstance() { - return INSTANCE; - } - - public static class XStatLibrary { - public static native int __xstat(int version, String path, Stat stats) throws LastErrorException; - } - - /** - * Retrieves the actual number of bytes of disk storage used to store a specified file. - * - * @param path the path to the file - * @return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file, or empty if the size is invalid - */ - @Override - public OptionalLong allocatedSizeInBytes(Path path) { - assert Files.isRegularFile(path) : path; - try { - final Stat stats = new Stat(); - final int rc = XStatLibrary.__xstat(STAT_VER, path.toString(), stats); - if (logger.isTraceEnabled()) { - logger.trace("executing native method __xstat() returned {} with error code [{}] for file [{}]", stats, rc, path); - } - return OptionalLong.of(stats.st_blocks * ST_BLOCKS_UNIT); - } catch (LastErrorException e) { - logger.warn( - () -> format( - "error when executing native method __xstat(int vers, const char *name, struct stat *buf) for file [%s]", - path - ), - e - ); - } - return OptionalLong.empty(); - } - - @Structure.FieldOrder( - { - "st_dev", - "st_ino", - "st_nlink", - "st_mode", - "st_uid", - "st_gid", - "__pad0", - "st_rdev", - "st_size", - "st_blksize", - "st_blocks", - "st_atim", - "st_mtim", - "st_ctim", - "__glibc_reserved0", - "__glibc_reserved1", - "__glibc_reserved2" } - ) - public static class Stat extends Structure { - - /** - * The stat structure varies across architectures in the glibc and kernel source codes. For example some fields might be ordered - * differently and/or some padding bytes might be present between some fields. - * - * The struct implemented here refers to the Linux x86 architecture in the glibc source files: - * - glibc version 2.23: sysdeps/unix/sysv/linux/x86/bits/stat.h - * - glibc version 2.33: sysdeps/unix/sysv/linux/x86/bits/struct_stat.h - * - * The following command is useful to compile the stat struct on a given system: - * echo "#include <sys/stat.h>" | gcc -xc - -E -dD | grep -ve '^$' | grep -A23 '^struct stat' - */ - public long st_dev; // __dev_t st_dev; /* Device. */ - public long st_ino; // __ino_t st_ino; /* File serial number. */ - public long st_nlink; // __nlink_t st_nlink; /* Link count. */ - public int st_mode; // __mode_t st_mode; /* File mode. */ - public int st_uid; // __uid_t st_uid; /* User ID of the file's owner. */ - public int st_gid; // __gid_t st_gid; /* Group ID of the file's group. */ - public int __pad0; - public long st_rdev; // __dev_t st_rdev; /* Device number, if device. */ - public long st_size; // __off_t st_size; /* Size of file, in bytes. */ - public long st_blksize; // __blksize_t st_blksize; /* Optimal block size for I/O. */ - public long st_blocks; // __blkcnt_t st_blocks; /* Number 512-byte blocks allocated. */ - public Time st_atim; // struct timespec st_atim; /* Time of last access. */ - public Time st_mtim; // struct timespec st_mtim; /* Time of last modification. */ - public Time st_ctim; // struct timespec st_ctim; /* Time of last status change. */ - public long __glibc_reserved0; // __syscall_slong_t - public long __glibc_reserved1; // __syscall_slong_t - public long __glibc_reserved2; // __syscall_slong_t - - @Override - public String toString() { - return "[st_dev=" - + st_dev - + ", st_ino=" - + st_ino - + ", st_nlink=" - + st_nlink - + ", st_mode=" - + st_mode - + ", st_uid=" - + st_uid - + ", st_gid=" - + st_gid - + ", st_rdev=" - + st_rdev - + ", st_size=" - + st_size - + ", st_blksize=" - + st_blksize - + ", st_blocks=" - + st_blocks - + ", st_atim=" - + Instant.ofEpochSecond(st_atim.tv_sec, st_atim.tv_nsec) - + ", st_mtim=" - + Instant.ofEpochSecond(st_mtim.tv_sec, st_mtim.tv_nsec) - + ", st_ctim=" - + Instant.ofEpochSecond(st_ctim.tv_sec, st_ctim.tv_nsec) - + ']'; - } - } - - @Structure.FieldOrder({ "tv_sec", "tv_nsec" }) - public static class Time extends Structure { - public long tv_sec; - public long tv_nsec; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/filesystem/PosixFileSystemNatives.java b/server/src/main/java/org/elasticsearch/common/filesystem/PosixFileSystemNatives.java new file mode 100644 index 0000000000000..0ebe31ed32c83 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/filesystem/PosixFileSystemNatives.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.filesystem; + +import com.sun.jna.LastErrorException; +import com.sun.jna.Library; +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.NativeLibrary; +import com.sun.jna.Platform; +import com.sun.jna.Pointer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.OptionalLong; + +import static org.elasticsearch.core.Strings.format; + +/** + * {@link FileSystemNatives.Provider} implementation for Linux x86-64bits + */ +final class PosixFileSystemNatives implements FileSystemNatives.Provider { + + private static final Logger logger = LogManager.getLogger(PosixFileSystemNatives.class); + + private static final PosixFileSystemNatives INSTANCE = new PosixFileSystemNatives(); + + /** st_blocks field indicates the number of blocks allocated to the file, 512-byte units **/ + private static final long ST_BLOCKS_UNIT = 512L; + + /** + * Version of the `struct stat' data structure. + * + * To allow the `struct stat' structure bits to vary without changing shared library major version number, the `stat' function is often + * an inline wrapper around `xstat' which takes a leading version-number argument designating the data structure and bits used. + * + * In glibc this version is defined in bits/stat.h (or bits/struct_stat.h in glibc 2.33, or bits/xstatver.h in more recent versions). + * + * For x86-64 the _STAT_VER used is: + * # define _STAT_VER_LINUX 1 + * # define _STAT_VER _STAT_VER_LINUX + * + * For other architectures the _STAT_VER used is: + * # define _STAT_VER_LINUX 0 + * # define _STAT_VER _STAT_VER_LINUX + **/ + private static int loadStatVersion() { + return "aarch64".equalsIgnoreCase(Constants.OS_ARCH) ? 0 : 1; + } + + private final Stat64Library lib; + + private PosixFileSystemNatives() { + assert Constants.JRE_IS_64BIT : Constants.OS_ARCH; + Stat64Library statFunction; + try { + var libc = NativeLibrary.getInstance(Platform.C_LIBRARY_NAME); + libc.getFunction("stat64"); + // getfunction didn't fail, so the symbol is available + statFunction = Native.load(Platform.C_LIBRARY_NAME, Stat64Library.class); + } catch (UnsatisfiedLinkError e) { + var xstat = Native.load(Platform.C_LIBRARY_NAME, XStatLibrary.class); + var version = loadStatVersion(); + statFunction = (path, stats) -> xstat.__xstat(version, path, stats); + } + lib = statFunction; + logger.debug("C library loaded"); + } + + static PosixFileSystemNatives getInstance() { + return INSTANCE; + } + + public interface Stat64Library extends Library { + int stat64(String path, Pointer stats) throws LastErrorException; + } + + public interface XStatLibrary extends Library { + int __xstat(int version, String path, Pointer stats) throws LastErrorException; + } + + /** + * Retrieves the actual number of bytes of disk storage used to store a specified file. + * + * @param path the path to the file + * @return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file, or empty if the size is invalid + */ + @Override + public OptionalLong allocatedSizeInBytes(Path path) { + assert Files.isRegularFile(path) : path; + try { + final Stat stats = new Stat(Constants.LINUX ? 64 : 104); + final int rc = lib.stat64(path.toString(), stats.memory); + if (logger.isTraceEnabled()) { + logger.trace("executing native method __xstat() returned {} with error code [{}] for file [{}]", stats, rc, path); + } + return OptionalLong.of(stats.getBlocks() * ST_BLOCKS_UNIT); + } catch (LastErrorException e) { + logger.warn( + () -> format( + "error when executing native method __xstat(int vers, const char *name, struct stat *buf) for file [%s]", + path + ), + e + ); + } + return OptionalLong.empty(); + } + + public static class Stat { + final Memory memory = new Memory(144); + final int blocksOffset; + + Stat(int blocksOffset) { + this.blocksOffset = blocksOffset; + } + + public long getBlocks() { + return memory.getLong(blocksOffset); + } + + @Override + public String toString() { + return "Stat [blocks=" + getBlocks() + "]"; + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 8ef80c08517de..b6abd3f7fab5a 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -24,6 +24,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.jdk.JarHell; import org.elasticsearch.plugins.PluginDescriptor; +import org.elasticsearch.preallocate.Preallocate; import org.elasticsearch.secure_sm.SecureSM; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PrivilegedOperations; @@ -111,9 +112,10 @@ public class BootstrapForTesting { // init mockito SecureMockMaker.init(); - // init the privileged operation try { - MethodHandles.publicLookup().ensureInitialized(PrivilegedOperations.class); + var lookup = MethodHandles.publicLookup(); + lookup.ensureInitialized(PrivilegedOperations.class); + lookup.ensureInitialized(Preallocate.class); } catch (IllegalAccessException unexpected) { throw new AssertionError(unexpected); } From e946fa80862d130db618627512ddbe64ac96cac1 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Tue, 16 Jul 2024 10:50:45 +0200 Subject: [PATCH 063/406] Backport bugfixes for union-types to 8.15 (#110872) * Fix bug in union-types with type-casting in grouping key of STATS (#110476) * Allow auto-generated type-cast fields in CsvTests This allows, for example, a csv-spec test result header like `client_ip::ip:ip`, which is generated with a command like `STATS count=count(*) BY client_ip::ip` It is also a small cleanup of the header parsing code, since it was using Strings.split() in an odd way. * Fix bug in union-types with type-casting in grouping key of STATS * Update docs/changelog/110476.yaml * Added casting_operator required capability Using the new `::` syntax requires disabling support for older versions in multi-cluster tests. * Added more tests for inline stats over long/datetime * Trying to fix the STATS...STATS bug This makes two changes: * Keeps the Alias in the aggs.aggregates from the grouping key, so that ReplaceStatsNestedExpressionWithEval still works * Adds explicit support for union-types conversion at grouping key loading in the ordinalGroupingOperatorFactory Neither fix the particular edge case, but do seem correct * Added EsqlCapability for this change So that mixed cluster tests don't fail these new queries. * Fix InsertFieldExtract for union types Union types require a FieldExtractExec to be performed first thing at the bottom of local physical plans. In queries like ``` from testidx* | eval x = to_string(client_ip) | stats c = count(*) by x | keep c ``` The `stats` has the grouping `x` but the aggregates get pruned to just `c`. In cases like this, we did not insert a FieldExtractExec, which this fixes. * Revert query that previously failed With Alex's fix, this query now passes. * Revert integration of union-types to ordinals aggregator This is because we have not found a test case that actually demonstrates this is necessary. * More tests that would fail without the latest fix * Correct code style * Fix failing case when aggregating on union-type with invalid grouping key * Capabilities restrictions on the new YML tests * Update docs/changelog/110476.yaml --------- Co-authored-by: Alexander Spies * An alternative approach to supporting union-types on stats grouping field (#110600) * Added union-types field extration to ordinals aggregation * Revert previous approach to getting union-types working in aggregations Where the grouping field is erased by later commands, like a subsequent stats. Instead we include union-type supports in the ordinals aggregation and mark the block loader as not supporting ordinals. * Fix union-types when aggregating on inline conversion function (#110652) A query like: ``` FROM sample_data, sample_data_str | STATS count=count(*) BY client_ip = TO_IP(client_ip) | SORT count DESC, client_ip ASC | KEEP count, client_ip ``` Failed due to unresolved aggregates from the union-type in the grouping key * Fix for union-types for multiple columns with the same name (#110793) * Make union types use unique attribute names * Cleanup leftover * Added failing test and final fix to EsRelation * Implement FieldAttribute.fieldName() * Fix tests * Refactor * Do not ignore union typed field's parent * Fix important typo D'oh * Mute unrelated (part of) test * Move capability to better location * Fix analyzer tests * multi-node tests with an earlier version of union-types (before this change) fail * Add capability to remaining failing tests * Remove variable * Add more complex test * Consolidate union type cleanup rules * Add 3 more required_capability's to make CI happy * Update caps for union type subfield yaml tests * Update docs/changelog/110793.yaml * Refined changelog text * Mute BWC for 8.15.0 for failing YAML tests * union_types_remove_fields for all 160_union_types tests The tests fail spordically, so safer to mute the entire suite. --------- Co-authored-by: Alexander Spies --------- Co-authored-by: Alexander Spies --- docs/changelog/110476.yaml | 7 + docs/changelog/110793.yaml | 7 + .../esql/core/expression/FieldAttribute.java | 21 +- .../ValueSourceReaderTypeConversionTests.java | 7 +- .../xpack/esql/CsvTestUtils.java | 23 +- .../src/main/resources/union_types.csv-spec | 249 +++++++++++++++++- .../xpack/esql/action/EsqlCapabilities.java | 15 ++ .../xpack/esql/analysis/Analyzer.java | 129 +++++---- .../function/UnsupportedAttribute.java | 7 + .../optimizer/LocalLogicalPlanOptimizer.java | 6 +- .../optimizer/rules/SubstituteSurrogates.java | 3 +- .../xpack/esql/plan/logical/EsRelation.java | 7 +- .../planner/EsPhysicalOperationProviders.java | 15 +- .../optimizer/LogicalPlanOptimizerTests.java | 8 +- .../test/esql/160_union_types.yml | 122 ++++++++- .../test/esql/161_union_types_subfields.yml | 2 +- 16 files changed, 546 insertions(+), 82 deletions(-) create mode 100644 docs/changelog/110476.yaml create mode 100644 docs/changelog/110793.yaml diff --git a/docs/changelog/110476.yaml b/docs/changelog/110476.yaml new file mode 100644 index 0000000000000..bc12b3711a366 --- /dev/null +++ b/docs/changelog/110476.yaml @@ -0,0 +1,7 @@ +pr: 110476 +summary: Fix bug in union-types with type-casting in grouping key of STATS +area: ES|QL +type: bug +issues: + - 109922 + - 110477 diff --git a/docs/changelog/110793.yaml b/docs/changelog/110793.yaml new file mode 100644 index 0000000000000..8f1f3ba9afeb7 --- /dev/null +++ b/docs/changelog/110793.yaml @@ -0,0 +1,7 @@ +pr: 110793 +summary: Fix for union-types for multiple columns with the same name +area: ES|QL +type: bug +issues: + - 110490 + - 109916 diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java index 0f7d92564c8ab..a3bc7ea621d8a 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java @@ -29,6 +29,10 @@ * - nestedParent - if nested, what's the parent (which might not be the immediate one) */ public class FieldAttribute extends TypedAttribute { + // TODO: This constant should not be used if possible; use .synthetic() + // https://github.com/elastic/elasticsearch/issues/105821 + public static final String SYNTHETIC_ATTRIBUTE_NAME_PREFIX = "$$"; + static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, "FieldAttribute", @@ -72,12 +76,11 @@ public FieldAttribute( boolean synthetic ) { super(source, name, type, qualifier, nullability, id, synthetic); - this.path = parent != null ? parent.name() : StringUtils.EMPTY; + this.path = parent != null ? parent.fieldName() : StringUtils.EMPTY; this.parent = parent; this.field = field; } - @SuppressWarnings("unchecked") public FieldAttribute(StreamInput in) throws IOException { /* * The funny casting dance with `(StreamInput & PlanStreamInput) in` is required @@ -131,6 +134,20 @@ public String path() { return path; } + /** + * The full name of the field in the index, including all parent fields. E.g. {@code parent.subfield.this_field}. + */ + public String fieldName() { + // Before 8.15, the field name was the same as the attribute's name. + // On later versions, the attribute can be renamed when creating synthetic attributes. + // TODO: We should use synthetic() to check for that case. + // https://github.com/elastic/elasticsearch/issues/105821 + if (name().startsWith(SYNTHETIC_ATTRIBUTE_NAME_PREFIX) == false) { + return name(); + } + return Strings.hasText(path) ? path + "." + field.getName() : field.getName(); + } + public String qualifiedPath() { // return only the qualifier is there's no path return qualifier() != null ? qualifier() + (Strings.hasText(path) ? "." + path : StringUtils.EMPTY) : path; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java index 66bcf2a57e393..09f63e9fa45bb 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java @@ -1687,12 +1687,13 @@ public StoredFieldsSpec rowStrideStoredFieldSpec() { @Override public boolean supportsOrdinals() { - return delegate.supportsOrdinals(); + // Fields with mismatching types cannot use ordinals for uniqueness determination, but must convert the values first + return false; } @Override - public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - return delegate.ordinals(context); + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new IllegalArgumentException("Ordinals are not supported for type conversion"); } @Override diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index d88d7f9b9448f..3b3e12978ae04 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -10,7 +10,6 @@ import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatters; @@ -332,15 +331,15 @@ public static ExpectedResults loadCsvSpecValues(String csv) { columnTypes = new ArrayList<>(header.length); for (String c : header) { - String[] nameWithType = Strings.split(c, ":"); - if (nameWithType == null || nameWithType.length != 2) { + String[] nameWithType = escapeTypecast(c).split(":"); + if (nameWithType.length != 2) { throw new IllegalArgumentException("Invalid CSV header " + c); } - String typeName = nameWithType[1].trim(); - if (typeName.length() == 0) { - throw new IllegalArgumentException("A type is always expected in the csv file; found " + nameWithType); + String typeName = unescapeTypecast(nameWithType[1]).trim(); + if (typeName.isEmpty()) { + throw new IllegalArgumentException("A type is always expected in the csv file; found " + Arrays.toString(nameWithType)); } - String name = nameWithType[0].trim(); + String name = unescapeTypecast(nameWithType[0]).trim(); columnNames.add(name); Type type = Type.asType(typeName); if (type == null) { @@ -398,6 +397,16 @@ public static ExpectedResults loadCsvSpecValues(String csv) { } } + private static final String TYPECAST_SPACER = "__TYPECAST__"; + + private static String escapeTypecast(String typecast) { + return typecast.replace("::", TYPECAST_SPACER); + } + + private static String unescapeTypecast(String typecast) { + return typecast.replace(TYPECAST_SPACER, "::"); + } + public enum Type { INTEGER(Integer::parseInt, Integer.class), LONG(Long::parseLong, Long.class), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index ee8c4be385e0f..eaf27dca83b3e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -45,8 +45,10 @@ FROM sample_data_ts_long ; singleIndexIpStats +required_capability: casting_operator + FROM sample_data -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | STATS count=count(*) BY client_ip | SORT count DESC, client_ip ASC | KEEP count, client_ip @@ -60,8 +62,10 @@ count:long | client_ip:ip ; singleIndexIpStringStats +required_capability: casting_operator + FROM sample_data_str -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | STATS count=count(*) BY client_ip | SORT count DESC, client_ip ASC | KEEP count, client_ip @@ -74,12 +78,29 @@ count:long | client_ip:ip 1 | 172.21.2.162 ; +singleIndexIpStringStatsInline +required_capability: casting_operator + +FROM sample_data_str +| STATS count=count(*) BY client_ip::ip +| STATS mc=count(count) BY count +| SORT mc DESC, count ASC +| KEEP mc, count +; + +mc:l | count:l +3 | 1 +1 | 4 +; + multiIndexIpString required_capability: union_types required_capability: metadata_fields +required_capability: casting_operator +required_capability: union_types_remove_fields FROM sample_data, sample_data_str METADATA _index -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC ; @@ -104,9 +125,11 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexIpStringRename required_capability: union_types required_capability: metadata_fields +required_capability: casting_operator +required_capability: union_types_remove_fields FROM sample_data, sample_data_str METADATA _index -| EVAL host_ip = TO_IP(client_ip) +| EVAL host_ip = client_ip::ip | KEEP _index, @timestamp, host_ip, event_duration, message | SORT _index ASC, @timestamp DESC ; @@ -131,6 +154,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexIpStringRenameToString required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data, sample_data_str METADATA _index | EVAL host_ip = TO_STRING(TO_IP(client_ip)) @@ -158,6 +182,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexWhereIpString required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data, sample_data_str METADATA _index | WHERE STARTS_WITH(TO_STRING(client_ip), "172.21.2") @@ -175,6 +200,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 3450233 | Connected multiIndexWhereIpStringLike required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data, sample_data_str METADATA _index | WHERE TO_STRING(client_ip) LIKE "172.21.2.*" @@ -189,11 +215,42 @@ sample_data_str | 2023-10-23T12:27:28.948Z | 2764889 | Connected sample_data_str | 2023-10-23T12:15:03.360Z | 3450233 | Connected to 10.1.0.3 ; +multiIndexSortIpString +required_capability: union_types +required_capability: casting_operator +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_str +| SORT client_ip::ip +| LIMIT 1 +; + +@timestamp:date | client_ip:null | event_duration:long | message:keyword +2023-10-23T13:33:34.937Z | null | 1232382 | Disconnected +; + +multiIndexSortIpStringEval +required_capability: union_types +required_capability: casting_operator +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_str +| SORT client_ip::ip, @timestamp ASC +| EVAL client_ip_as_ip = client_ip::ip +| LIMIT 1 +; + +@timestamp:date | client_ip:null | event_duration:long | message:keyword | client_ip_as_ip:ip +2023-10-23T13:33:34.937Z | null | 1232382 | Disconnected | 172.21.0.5 +; + multiIndexIpStringStats required_capability: union_types +required_capability: casting_operator +required_capability: union_types_remove_fields FROM sample_data, sample_data_str -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | STATS count=count(*) BY client_ip | SORT count DESC, client_ip ASC | KEEP count, client_ip @@ -208,9 +265,11 @@ count:long | client_ip:ip multiIndexIpStringRenameStats required_capability: union_types +required_capability: casting_operator +required_capability: union_types_remove_fields FROM sample_data, sample_data_str -| EVAL host_ip = TO_IP(client_ip) +| EVAL host_ip = client_ip::ip | STATS count=count(*) BY host_ip | SORT count DESC, host_ip ASC | KEEP count, host_ip @@ -225,6 +284,7 @@ count:long | host_ip:ip multiIndexIpStringRenameToStringStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_str | EVAL host_ip = TO_STRING(TO_IP(client_ip)) @@ -240,6 +300,24 @@ count:long | host_ip:keyword 2 | 172.21.2.162 ; +multiIndexIpStringStatsDrop +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_str +| STATS count=count(*) BY client_ip::ip +| KEEP count +| SORT count DESC +; + +count:long +8 +2 +2 +2 +; + multiIndexIpStringStatsInline required_capability: union_types required_capability: union_types_inline_fix @@ -257,8 +335,42 @@ count:long | client_ip:ip 2 | 172.21.2.162 ; +multiIndexIpStringStatsInline2 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_str +| STATS count=count(*) BY client_ip::ip +| SORT count DESC, `client_ip::ip` ASC +; + +count:long | client_ip::ip:ip +8 | 172.21.3.15 +2 | 172.21.0.5 +2 | 172.21.2.113 +2 | 172.21.2.162 +; + +multiIndexIpStringStatsInline3 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_str +| STATS count=count(*) BY client_ip::ip +| STATS mc=count(count) BY count +| SORT mc DESC, count ASC +; + +mc:l | count:l +3 | 2 +1 | 8 +; + multiIndexWhereIpStringStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_str | WHERE STARTS_WITH(TO_STRING(client_ip), "172.21.2") @@ -275,6 +387,7 @@ count:long | message:keyword multiIndexTsLong required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long METADATA _index | EVAL @timestamp = TO_DATETIME(@timestamp) @@ -302,6 +415,7 @@ sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexTsLongRename required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long METADATA _index | EVAL ts = TO_DATETIME(@timestamp) @@ -329,6 +443,7 @@ sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexTsLongRenameToString required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long METADATA _index | EVAL ts = TO_STRING(TO_DATETIME(@timestamp)) @@ -356,6 +471,7 @@ sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexWhereTsLong required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long METADATA _index | WHERE TO_LONG(@timestamp) < 1698068014937 @@ -372,6 +488,7 @@ sample_data_ts_long | 172.21.2.162 | 3450233 | Connected to 10. multiIndexTsLongStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long | EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATETIME(@timestamp)) @@ -385,8 +502,80 @@ count:long | @timestamp:date 4 | 2023-10-23T12:00:00.000Z ; +multiIndexTsLongStatsDrop +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_ts_long +| STATS count=count(*) BY @timestamp::datetime +| KEEP count +; + +count:long +2 +2 +2 +2 +2 +2 +2 +; + +multiIndexTsLongStatsInline2 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_ts_long +| STATS count=count(*) BY @timestamp::datetime +| SORT count DESC, `@timestamp::datetime` DESC +; + +count:long | @timestamp::datetime:datetime +2 | 2023-10-23T13:55:01.543Z +2 | 2023-10-23T13:53:55.832Z +2 | 2023-10-23T13:52:55.015Z +2 | 2023-10-23T13:51:54.732Z +2 | 2023-10-23T13:33:34.937Z +2 | 2023-10-23T12:27:28.948Z +2 | 2023-10-23T12:15:03.360Z +; + +multiIndexTsLongStatsInline3 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_ts_long +| STATS count=count(*) BY @timestamp::datetime +| STATS mc=count(count) BY count +| SORT mc DESC, count ASC +; + +mc:l | count:l +7 | 2 +; + +multiIndexTsLongStatsStats +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_long +| EVAL ts = TO_STRING(@timestamp) +| STATS count = COUNT(*) BY ts +| STATS mc = COUNT(count) BY count +| SORT mc DESC, count ASC +; + +mc:l | count:l +14 | 1 +; + multiIndexTsLongRenameStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long | EVAL hour = DATE_TRUNC(1 hour, TO_DATETIME(@timestamp)) @@ -402,6 +591,7 @@ count:long | hour:date multiIndexTsLongRenameToDatetimeToStringStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long | EVAL hour = LEFT(TO_STRING(TO_DATETIME(@timestamp)), 13) @@ -417,6 +607,7 @@ count:long | hour:keyword multiIndexTsLongRenameToStringStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long | EVAL mess = LEFT(TO_STRING(@timestamp), 7) @@ -435,6 +626,7 @@ count:long | mess:keyword multiIndexTsLongStatsInline required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long | STATS count=COUNT(*), max=MAX(TO_DATETIME(@timestamp)) @@ -459,6 +651,7 @@ count:long multiIndexWhereTsLongStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data, sample_data_ts_long | WHERE TO_LONG(@timestamp) < 1698068014937 @@ -475,6 +668,7 @@ count:long | message:keyword multiIndexIpStringTsLong required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data* METADATA _index | EVAL @timestamp = TO_DATETIME(@timestamp), client_ip = TO_IP(client_ip) @@ -543,6 +737,7 @@ sample_data_ts_long | 8268153 | Connection error multiIndexIpStringTsLongRename required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data* METADATA _index | EVAL ts = TO_DATETIME(@timestamp), host_ip = TO_IP(client_ip) @@ -611,6 +806,7 @@ sample_data_ts_long | 8268153 | Connection error multiIndexIpStringTsLongRenameToString required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data* METADATA _index | EVAL ts = TO_STRING(TO_DATETIME(@timestamp)), host_ip = TO_STRING(TO_IP(client_ip)) @@ -645,6 +841,7 @@ sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexWhereIpStringTsLong required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data* METADATA _index | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) == "172.21.2.162" @@ -660,6 +857,7 @@ sample_data_ts_long | 3450233 | Connected to 10.1.0.3 multiIndexWhereIpStringTsLongStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data* | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) == "172.21.2.162" @@ -675,6 +873,7 @@ count:long | message:keyword multiIndexWhereIpStringLikeTsLong required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data* METADATA _index | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) LIKE "172.21.2.16?" @@ -690,6 +889,7 @@ sample_data_ts_long | 3450233 | Connected to 10.1.0.3 multiIndexWhereIpStringLikeTsLongStats required_capability: union_types +required_capability: union_types_remove_fields FROM sample_data* | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) LIKE "172.21.2.16?" @@ -705,6 +905,7 @@ count:long | message:keyword multiIndexMultiColumnTypesRename required_capability: union_types required_capability: metadata_fields +required_capability: union_types_remove_fields FROM sample_data* METADATA _index | WHERE event_duration > 8000000 @@ -717,3 +918,39 @@ null | null | 8268153 | Connection error | samp null | null | 8268153 | Connection error | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 null | null | 8268153 | Connection error | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 ; + +multiIndexMultiColumnTypesRenameAndKeep +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data* METADATA _index +| WHERE event_duration > 8000000 +| EVAL ts = TO_DATETIME(@timestamp), ts_str = TO_STRING(@timestamp), ts_l = TO_LONG(@timestamp), ip = TO_IP(client_ip), ip_str = TO_STRING(client_ip) +| KEEP _index, ts, ts_str, ts_l, ip, ip_str, event_duration +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k | event_duration:long +sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +; + +multiIndexMultiColumnTypesRenameAndDrop +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data* METADATA _index +| WHERE event_duration > 8000000 +| EVAL ts = TO_DATETIME(@timestamp), ts_str = TO_STRING(@timestamp), ts_l = TO_LONG(@timestamp), ip = TO_IP(client_ip), ip_str = TO_STRING(client_ip) +| DROP @timestamp, client_ip, message +| SORT _index ASC, ts DESC +; + +event_duration:long | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k +8268153 | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 8f24cd113a056..d361a0f9ebd3d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -108,6 +108,21 @@ public enum Cap { */ AGG_WEIGHTED_AVG, + /** + * Fix for union-types when aggregating over an inline conversion with casting operator. Done in #110476. + */ + UNION_TYPES_AGG_CAST, + + /** + * Fix for union-types when aggregating over an inline conversion with conversion function. Done in #110652. + */ + UNION_TYPES_INLINE_FIX, + + /** + * Fix for union-types when sorting a type-casted field. We changed how we remove synthetic union-types fields. + */ + UNION_TYPES_REMOVE_FIELDS, + /** * Fix a parsing issue where numbers below Long.MIN_VALUE threw an exception instead of parsing as doubles. * see Parsing large numbers is inconsistent #104323 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 4fcd37faa311a..21203f8dbb3dd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -42,6 +42,7 @@ import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; +import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.rule.RuleExecutor; import org.elasticsearch.xpack.esql.core.session.Configuration; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -63,6 +64,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.DateTimeArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSurrogates; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -141,7 +143,7 @@ public class Analyzer extends ParameterizedRuleExecutor("Finish Analysis", Limiter.ONCE, new AddImplicitLimit(), new UnresolveUnionTypes()); + var finish = new Batch<>("Finish Analysis", Limiter.ONCE, new AddImplicitLimit(), new UnionTypesCleanup()); rules = List.of(init, resolution, finish); } @@ -217,13 +219,13 @@ private static List mappingAsAttributes(Source source, Map list, Source source, String parentName, Map mapping) { + private static void mappingAsAttributes(List list, Source source, FieldAttribute parent, Map mapping) { for (Map.Entry entry : mapping.entrySet()) { String name = entry.getKey(); EsField t = entry.getValue(); if (t != null) { - name = parentName == null ? name : parentName + "." + name; + name = parent == null ? name : parent.fieldName() + "." + name; var fieldProperties = t.getProperties(); var type = t.getDataType().widenSmallNumeric(); // due to a bug also copy the field since the Attribute hierarchy extracts the data type @@ -232,19 +234,16 @@ private static void mappingAsAttributes(List list, Source source, Str t = new EsField(t.getName(), type, t.getProperties(), t.isAggregatable(), t.isAlias()); } + FieldAttribute attribute = t instanceof UnsupportedEsField uef + ? new UnsupportedAttribute(source, name, uef) + : new FieldAttribute(source, parent, name, t); // primitive branch if (EsqlDataTypes.isPrimitive(type)) { - Attribute attribute; - if (t instanceof UnsupportedEsField uef) { - attribute = new UnsupportedAttribute(source, name, uef); - } else { - attribute = new FieldAttribute(source, null, name, t); - } list.add(attribute); } // allow compound object even if they are unknown (but not NESTED) if (type != NESTED && fieldProperties.isEmpty() == false) { - mappingAsAttributes(list, source, name, fieldProperties); + mappingAsAttributes(list, source, attribute, fieldProperties); } } } @@ -1087,38 +1086,51 @@ protected LogicalPlan doRule(LogicalPlan plan) { return plan; } - // Otherwise drop the converted attributes after the alias function, as they are only needed for this function, and - // the original version of the attribute should still be seen as unconverted. - plan = dropConvertedAttributes(plan, unionFieldAttributes); + // In ResolveRefs the aggregates are resolved from the groupings, which might have an unresolved MultiTypeEsField. + // Now that we have resolved those, we need to re-resolve the aggregates. + if (plan instanceof EsqlAggregate agg) { + // If the union-types resolution occurred in a child of the aggregate, we need to check the groupings + plan = agg.transformExpressionsOnly(FieldAttribute.class, UnionTypesCleanup::checkUnresolved); + + // Aggregates where the grouping key comes from a union-type field need to be resolved against the grouping key + Map resolved = new HashMap<>(); + for (Expression e : agg.groupings()) { + Attribute attr = Expressions.attribute(e); + if (attr != null && attr.resolved()) { + resolved.put(attr, e); + } + } + plan = plan.transformExpressionsOnly(UnresolvedAttribute.class, ua -> resolveAttribute(ua, resolved)); + } // And add generated fields to EsRelation, so these new attributes will appear in the OutputExec of the Fragment // and thereby get used in FieldExtractExec plan = plan.transformDown(EsRelation.class, esr -> { - List output = esr.output(); List missing = new ArrayList<>(); for (FieldAttribute fa : unionFieldAttributes) { - if (output.stream().noneMatch(a -> a.id().equals(fa.id()))) { + // Using outputSet().contains looks by NameId, resp. uses semanticEquals. + if (esr.outputSet().contains(fa) == false) { missing.add(fa); } } + if (missing.isEmpty() == false) { - output.addAll(missing); - return new EsRelation(esr.source(), esr.index(), output, esr.indexMode(), esr.frozen()); + List newOutput = new ArrayList<>(esr.output()); + newOutput.addAll(missing); + return new EsRelation(esr.source(), esr.index(), newOutput, esr.indexMode(), esr.frozen()); } return esr; }); return plan; } - private LogicalPlan dropConvertedAttributes(LogicalPlan plan, List unionFieldAttributes) { - List projections = new ArrayList<>(plan.output()); - for (var e : unionFieldAttributes) { - projections.removeIf(p -> p.id().equals(e.id())); - } - if (projections.size() != plan.output().size()) { - return new EsqlProject(plan.source(), plan, projections); - } - return plan; + private Expression resolveAttribute(UnresolvedAttribute ua, Map resolved) { + var named = resolveAgainstList(ua, resolved.keySet()); + return switch (named.size()) { + case 0 -> ua; + case 1 -> named.get(0).equals(ua) ? ua : resolved.get(named.get(0)); + default -> ua.withUnresolvedMessage("Resolved [" + ua + "] unexpectedly to multiple attributes " + named); + }; } private Expression resolveConvertFunction(AbstractConvertFunction convert, List unionFieldAttributes) { @@ -1149,7 +1161,13 @@ private Expression createIfDoesNotAlreadyExist( MultiTypeEsField resolvedField, List unionFieldAttributes ) { - var unionFieldAttribute = new FieldAttribute(fa.source(), fa.name(), resolvedField); // Generates new ID for the field + // Generate new ID for the field and suffix it with the data type to maintain unique attribute names. + String unionTypedFieldName = SubstituteSurrogates.rawTemporaryName( + fa.name(), + "converted_to", + resolvedField.getDataType().typeName() + ); + FieldAttribute unionFieldAttribute = new FieldAttribute(fa.source(), fa.parent(), unionTypedFieldName, resolvedField); int existingIndex = unionFieldAttributes.indexOf(unionFieldAttribute); if (existingIndex >= 0) { // Do not generate multiple name/type combinations with different IDs @@ -1182,32 +1200,53 @@ private Expression typeSpecificConvert(AbstractConvertFunction convert, Source s } /** - * If there was no AbstractConvertFunction that resolved multi-type fields in the ResolveUnionTypes rules, - * then there could still be some FieldAttributes that contain unresolved MultiTypeEsFields. - * These need to be converted back to actual UnresolvedAttribute in order for validation to generate appropriate failures. + * {@link ResolveUnionTypes} creates new, synthetic attributes for union types: + * If there was no {@code AbstractConvertFunction} that resolved multi-type fields in the {@link ResolveUnionTypes} rule, + * then there could still be some {@code FieldAttribute}s that contain unresolved {@link MultiTypeEsField}s. + * These need to be converted back to actual {@code UnresolvedAttribute} in order for validation to generate appropriate failures. + *

+ * Finally, if {@code client_ip} is present in 2 indices, once with type {@code ip} and once with type {@code keyword}, + * using {@code EVAL x = to_ip(client_ip)} will create a single attribute @{code $$client_ip$converted_to$ip}. + * This should not spill into the query output, so we drop such attributes at the end. */ - private static class UnresolveUnionTypes extends AnalyzerRules.AnalyzerRule { - @Override - protected boolean skipResolved() { - return false; - } + private static class UnionTypesCleanup extends Rule { + public LogicalPlan apply(LogicalPlan plan) { + LogicalPlan planWithCheckedUnionTypes = plan.transformUp(LogicalPlan.class, p -> { + if (p instanceof EsRelation esRelation) { + // Leave esRelation as InvalidMappedField so that UNSUPPORTED fields can still pass through + return esRelation; + } + return p.transformExpressionsOnly(FieldAttribute.class, UnionTypesCleanup::checkUnresolved); + }); - @Override - protected LogicalPlan rule(LogicalPlan plan) { - if (plan instanceof EsRelation esRelation) { - // Leave esRelation as InvalidMappedField so that UNSUPPORTED fields can still pass through - return esRelation; - } - return plan.transformExpressionsOnly(FieldAttribute.class, UnresolveUnionTypes::checkUnresolved); + // To drop synthetic attributes at the end, we need to compute the plan's output. + // This is only legal to do if the plan is resolved. + return planWithCheckedUnionTypes.resolved() + ? planWithoutSyntheticAttributes(planWithCheckedUnionTypes) + : planWithCheckedUnionTypes; } - private static Attribute checkUnresolved(FieldAttribute fa) { - var field = fa.field(); - if (field instanceof InvalidMappedField imf) { + static Attribute checkUnresolved(FieldAttribute fa) { + if (fa.field() instanceof InvalidMappedField imf) { String unresolvedMessage = "Cannot use field [" + fa.name() + "] due to ambiguities being " + imf.errorMessage(); return new UnresolvedAttribute(fa.source(), fa.name(), fa.qualifier(), fa.id(), unresolvedMessage, null); } return fa; } + + private static LogicalPlan planWithoutSyntheticAttributes(LogicalPlan plan) { + List output = plan.output(); + List newOutput = new ArrayList<>(output.size()); + + for (Attribute attr : output) { + // TODO: this should really use .synthetic() + // https://github.com/elastic/elasticsearch/issues/105821 + if (attr.name().startsWith(FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX) == false) { + newOutput.add(attr); + } + } + + return newOutput.size() == output.size() ? plan : new Project(Source.EMPTY, plan, newOutput); + } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java index 22c4aa9c6bf07..a553361f60a18 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java @@ -104,6 +104,13 @@ public UnsupportedEsField field() { return (UnsupportedEsField) super.field(); } + @Override + public String fieldName() { + // The super fieldName uses parents to compute the path; this class ignores parents, so we need to rely on the name instead. + // Using field().getName() would be wrong: for subfields like parent.subfield that would return only the last part, subfield. + return name(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, UnsupportedAttribute::new, name(), field(), hasCustomMessage ? message : null, id()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java index ba5e8316a666c..05554a0756a9d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java @@ -140,7 +140,8 @@ else if (plan instanceof Project project) { Map nullLiteral = Maps.newLinkedHashMapWithExpectedSize(DataType.types().size()); for (NamedExpression projection : projections) { - if (projection instanceof FieldAttribute f && stats.exists(f.qualifiedName()) == false) { + // Do not use the attribute name, this can deviate from the field name for union types. + if (projection instanceof FieldAttribute f && stats.exists(f.fieldName()) == false) { DataType dt = f.dataType(); Alias nullAlias = nullLiteral.get(f.dataType()); // save the first field as null (per datatype) @@ -170,7 +171,8 @@ else if (plan instanceof Project project) { || plan instanceof TopN) { plan = plan.transformExpressionsOnlyUp( FieldAttribute.class, - f -> stats.exists(f.qualifiedName()) ? f : Literal.of(f, null) + // Do not use the attribute name, this can deviate from the field name for union types. + f -> stats.exists(f.fieldName()) ? f : Literal.of(f, null) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java index fa4049b0e5a3a..b734a72ef5e22 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; @@ -141,7 +142,7 @@ public static String temporaryName(Expression inner, Expression outer, int suffi } public static String rawTemporaryName(String inner, String outer, String suffix) { - return "$$" + inner + "$" + outer + "$" + suffix; + return FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + suffix; } static int TO_STRING_LIMIT = 16; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java index 08916c14e91bf..726b35c90f4d6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java @@ -99,7 +99,7 @@ public boolean expressionsResolved() { @Override public int hashCode() { - return Objects.hash(index, indexMode, frozen); + return Objects.hash(index, indexMode, frozen, attrs); } @Override @@ -113,7 +113,10 @@ public boolean equals(Object obj) { } EsRelation other = (EsRelation) obj; - return Objects.equals(index, other.index) && indexMode == other.indexMode() && frozen == other.frozen; + return Objects.equals(index, other.index) + && indexMode == other.indexMode() + && frozen == other.frozen + && Objects.equals(attrs, other.attrs); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 9e1e1a50fe8f0..0c1928c7c9845 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -116,7 +116,8 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi DataType dataType = attr.dataType(); MappedFieldType.FieldExtractPreference fieldExtractPreference = PlannerUtils.extractPreference(docValuesAttrs.contains(attr)); ElementType elementType = PlannerUtils.toElementType(dataType, fieldExtractPreference); - String fieldName = attr.name(); + // Do not use the field attribute name, this can deviate from the field name for union types. + String fieldName = attr instanceof FieldAttribute fa ? fa.fieldName() : attr.name(); boolean isUnsupported = dataType == DataType.UNSUPPORTED; IntFunction loader = s -> getBlockLoaderFor(s, fieldName, isUnsupported, fieldExtractPreference, unionTypes); fields.add(new ValuesSourceReaderOperator.FieldInfo(fieldName, elementType, loader)); @@ -233,8 +234,11 @@ public final Operator.OperatorFactory ordinalGroupingOperatorFactory( // The grouping-by values are ready, let's group on them directly. // Costin: why are they ready and not already exposed in the layout? boolean isUnsupported = attrSource.dataType() == DataType.UNSUPPORTED; + var unionTypes = findUnionTypes(attrSource); + // Do not use the field attribute name, this can deviate from the field name for union types. + String fieldName = attrSource instanceof FieldAttribute fa ? fa.fieldName() : attrSource.name(); return new OrdinalsGroupingOperator.OrdinalsGroupingOperatorFactory( - shardIdx -> shardContexts.get(shardIdx).blockLoader(attrSource.name(), isUnsupported, NONE), + shardIdx -> getBlockLoaderFor(shardIdx, fieldName, isUnsupported, NONE, unionTypes), vsShardContexts, groupElementType, docChannel, @@ -434,12 +438,13 @@ public StoredFieldsSpec rowStrideStoredFieldSpec() { @Override public boolean supportsOrdinals() { - return delegate.supportsOrdinals(); + // Fields with mismatching types cannot use ordinals for uniqueness determination, but must convert the values first + return false; } @Override - public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - return delegate.ordinals(context); + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new IllegalArgumentException("Ordinals are not supported for type conversion"); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index de5d734c559d3..e7a999b892f44 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -5507,9 +5507,11 @@ METRICS k8s count(to_long(network.total_bytes_in)) BY bucket(@timestamp, 1 minut EsRelation relation = as(eval.child(), EsRelation.class); assertThat(relation.indexMode(), equalTo(IndexMode.STANDARD)); } - for (int i = 1; i < plans.size(); i++) { - assertThat(plans.get(i), equalTo(plans.get(0))); - } + // TODO: Unmute this part + // https://github.com/elastic/elasticsearch/issues/110827 + // for (int i = 1; i < plans.size(); i++) { + // assertThat(plans.get(i), equalTo(plans.get(0))); + // } } public void testRateInStats() { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml index f3403ca8751c0..003b1d0651d11 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml @@ -4,8 +4,8 @@ setup: - method: POST path: /_query parameters: [method, path, parameters, capabilities] - capabilities: [union_types] - reason: "Union types introduced in 8.15.0" + capabilities: [union_types, union_types_remove_fields, casting_operator] + reason: "Union types and casting operator introduced in 8.15.0" test_runner_features: [capabilities, allowed_warnings_regex] - do: @@ -147,6 +147,9 @@ setup: - '{"index": {}}' - '{"@timestamp": "2023-10-23T12:15:03.360Z", "client_ip": "172.21.2.162", "event_duration": "3450233", "message": "Connected to 10.1.0.3"}' +############################################################################################################ +# Test a single index as a control of the expected results + --- load single index ip_long: - do: @@ -173,9 +176,6 @@ load single index ip_long: - match: { values.0.3: 1756467 } - match: { values.0.4: "Connected to 10.1.0.1" } -############################################################################################################ -# Test a single index as a control of the expected results - --- load single index keyword_keyword: - do: @@ -202,6 +202,62 @@ load single index keyword_keyword: - match: { values.0.3: "1756467" } - match: { values.0.4: "Connected to 10.1.0.1" } +--- +load single index ip_long and aggregate by client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_long | STATS count = COUNT(*) BY client_ip::ip | SORT count DESC, `client_ip::ip` ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "client_ip::ip" } + - match: { columns.1.type: "ip" } + - length: { values: 4 } + - match: { values.0.0: 4 } + - match: { values.0.1: "172.21.3.15" } + - match: { values.1.0: 1 } + - match: { values.1.1: "172.21.0.5" } + - match: { values.2.0: 1 } + - match: { values.2.1: "172.21.2.113" } + - match: { values.3.0: 1 } + - match: { values.3.1: "172.21.2.162" } + +--- +load single index ip_long and aggregate client_ip my message: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_long | STATS count = COUNT(client_ip::ip) BY message | SORT count DESC, message ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "message" } + - match: { columns.1.type: "keyword" } + - length: { values: 5 } + - match: { values.0.0: 3 } + - match: { values.0.1: "Connection error" } + - match: { values.1.0: 1 } + - match: { values.1.1: "Connected to 10.1.0.1" } + - match: { values.2.0: 1 } + - match: { values.2.1: "Connected to 10.1.0.2" } + - match: { values.3.0: 1 } + - match: { values.3.1: "Connected to 10.1.0.3" } + - match: { values.4.0: 1 } + - match: { values.4.1: "Disconnected" } + +--- +load single index ip_long stats invalid grouping: + - do: + catch: '/Unknown column \[x\]/' + esql.query: + body: + query: 'FROM events_ip_long | STATS count = COUNT(client_ip::ip) BY x' + ############################################################################################################ # Test two indices where the event_duration is mapped as a LONG and as a KEYWORD @@ -512,6 +568,62 @@ load two indices, convert, rename but not drop ambiguous field client_ip: - match: { values.1.5: "172.21.3.15" } - match: { values.1.6: "172.21.3.15" } +--- +load two indexes and group by converted client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | STATS count = COUNT(*) BY client_ip::ip | SORT count DESC, `client_ip::ip` ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "client_ip::ip" } + - match: { columns.1.type: "ip" } + - length: { values: 4 } + - match: { values.0.0: 8 } + - match: { values.0.1: "172.21.3.15" } + - match: { values.1.0: 2 } + - match: { values.1.1: "172.21.0.5" } + - match: { values.2.0: 2 } + - match: { values.2.1: "172.21.2.113" } + - match: { values.3.0: 2 } + - match: { values.3.1: "172.21.2.162" } + +--- +load two indexes and aggregate converted client_ip: + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | STATS count = COUNT(client_ip::ip) BY message | SORT count DESC, message ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "message" } + - match: { columns.1.type: "keyword" } + - length: { values: 5 } + - match: { values.0.0: 6 } + - match: { values.0.1: "Connection error" } + - match: { values.1.0: 2 } + - match: { values.1.1: "Connected to 10.1.0.1" } + - match: { values.2.0: 2 } + - match: { values.2.1: "Connected to 10.1.0.2" } + - match: { values.3.0: 2 } + - match: { values.3.1: "Connected to 10.1.0.3" } + - match: { values.4.0: 2 } + - match: { values.4.1: "Disconnected" } + +--- +load two indexes, convert client_ip and group by something invalid: + - do: + catch: '/Unknown column \[x\]/' + esql.query: + body: + query: 'FROM events_*_long | STATS count = COUNT(client_ip::ip) BY x' + ############################################################################################################ # Test four indices with both the client_IP (IP and KEYWORD) and event_duration (LONG and KEYWORD) mappings diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml index 99bd1d6508895..ccf6512ca1ff7 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/161_union_types_subfields.yml @@ -4,7 +4,7 @@ setup: - method: POST path: /_query parameters: [ method, path, parameters, capabilities ] - capabilities: [ union_types ] + capabilities: [ union_types, union_types_remove_fields ] reason: "Union types introduced in 8.15.0" test_runner_features: [ capabilities, allowed_warnings_regex ] From 016360a4ec6bed52a86724425cc3282f0d0b2a4d Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Tue, 16 Jul 2024 04:04:47 -0600 Subject: [PATCH 064/406] (Doc+) Error "number of documents in the index can't exceed" (#110449) (#110910) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * (Doc+) Error "number of documents in the index can't exceed" 👋 howdy, team! This adds resolution outline for error ... which induces ongoing, lowkey support ``` Number of documents in the index can't exceed [2147483519] ``` * feedback * feedback * feedback * feedback * feedback * Test change to address docs check failure * Revert test change * Test docs check --------- Co-authored-by: David Turner Co-authored-by: Liam Thompson <32779855+leemthompo@users.noreply.github.com> --- .../how-to/size-your-shards.asciidoc | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 56e5fbbf15c77..53f47fc88cdb2 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -544,3 +544,36 @@ PUT _cluster/settings } } ---- + +[discrete] +==== Number of documents in the shard cannot exceed [2147483519] + + +Elasticsearch shards reflect Lucene's underlying https://github.com/apache/lucene/issues/5176[index +`MAX_DOC` hard limit] of 2,147,483,519 (`(2^31)-129`) docs. This figure is +the sum of `docs.count` plus `docs.deleted` as reported by the <> +per shard. Exceeding this limit will result in errors like the following: + +[source,txt] +---- +Elasticsearch exception [type=illegal_argument_exception, reason=Number of documents in the shard cannot exceed [2147483519]] +---- + +TIP: This calculation may differ from the <> calculation, because the Count API does not include nested documents. + + +Try using the <> to clear deleted docs. For example: + +[source,console] +---- +POST my-index-000001/_forcemerge?only_expunge_deletes=true +---- +// TEST[setup:my_index] + +This will launch an asynchronous task which can be monitored via the <>. + +For a long-term solution try: + +* <> +* aligning the index to recommendations on this page by either +<> or <> the index From 033c92ecebe230315f64c0249fd83a63c0c3e26f Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Tue, 16 Jul 2024 12:32:13 +0200 Subject: [PATCH 065/406] Union types documentation (#110183) (#110912) * Union types documentation * Try remove asciidoc error * Another attempt * Using literal block * Nicer formatting * Remove partintro * Small refinements * Edits for clarity and style --------- Co-authored-by: Marci W <333176+marciw@users.noreply.github.com> --- docs/reference/esql/esql-limitations.asciidoc | 5 + docs/reference/esql/esql-multi-index.asciidoc | 175 ++++++++++++++++++ docs/reference/esql/esql-using.asciidoc | 4 + .../esql/source-commands/from.asciidoc | 10 +- 4 files changed, 188 insertions(+), 6 deletions(-) create mode 100644 docs/reference/esql/esql-multi-index.asciidoc diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 11e3fd7ae9883..8accc7550edbb 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -85,6 +85,11 @@ Some <> are not supported in all contexts: ** `cartesian_point` ** `cartesian_shape` +In addition, when <>, +it's possible for the same field to be mapped to multiple types. +These fields cannot be directly used in queries or returned in results, +unless they're <>. + [discrete] [[esql-_source-availability]] === _source availability diff --git a/docs/reference/esql/esql-multi-index.asciidoc b/docs/reference/esql/esql-multi-index.asciidoc new file mode 100644 index 0000000000000..41ff6a27417b1 --- /dev/null +++ b/docs/reference/esql/esql-multi-index.asciidoc @@ -0,0 +1,175 @@ +[[esql-multi-index]] +=== Using {esql} to query multiple indices +++++ +Using {esql} to query multiple indices +++++ + +With {esql}, you can execute a single query across multiple indices, data streams, or aliases. +To do so, use wildcards and date arithmetic. The following example uses a comma-separated list and a wildcard: + +[source,esql] +---- +FROM employees-00001,other-employees-* +---- + +Use the format `:` to <>: + +[source,esql] +---- +FROM cluster_one:employees-00001,cluster_two:other-employees-* +---- + +[discrete] +[[esql-multi-index-invalid-mapping]] +=== Field type mismatches + +When querying multiple indices, data streams, or aliases, you might find that the same field is mapped to multiple different types. +For example, consider the two indices with the following field mappings: + +*index: events_ip* +``` +{ + "mappings": { + "properties": { + "@timestamp": { "type": "date" }, + "client_ip": { "type": "ip" }, + "event_duration": { "type": "long" }, + "message": { "type": "keyword" } + } + } +} +``` + +*index: events_keyword* +``` +{ + "mappings": { + "properties": { + "@timestamp": { "type": "date" }, + "client_ip": { "type": "keyword" }, + "event_duration": { "type": "long" }, + "message": { "type": "keyword" } + } + } +} +``` + +When you query each of these individually with a simple query like `FROM events_ip`, the results are provided with type-specific columns: + +[source.merge.styled,esql] +---- +FROM events_ip +| SORT @timestamp DESC +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +@timestamp:date | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +|=== + +Note how the `client_ip` column is correctly identified as type `ip`, and all values are displayed. +However, if instead the query sources two conflicting indices with `FROM events_*`, the type of the `client_ip` column cannot be determined +and is reported as `unsupported` with all values returned as `null`. + +[[query-unsupported]] +[source.merge.styled,esql] +---- +FROM events_* +| SORT @timestamp DESC +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +@timestamp:date | client_ip:unsupported | event_duration:long | message:keyword +2023-10-23T13:55:01.543Z | null | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z | null | 5033755 | Connection error +2023-10-23T13:52:55.015Z | null | 8268153 | Connection error +2023-10-23T13:51:54.732Z | null | 725448 | Connection error +2023-10-23T13:33:34.937Z | null | 1232382 | Disconnected +2023-10-23T12:27:28.948Z | null | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z | null | 3450233 | Connected to 10.1.0.3 +|=== + +In addition, if the query refers to this unsupported field directly, the query fails: + +[source.merge.styled,esql] +---- +FROM events_* +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +---- + +[source,bash] +---- +Cannot use field [client_ip] due to ambiguities being mapped as +[2] incompatible types: + [ip] in [events_ip], + [keyword] in [events_keyword] +---- + +[discrete] +[[esql-multi-index-union-types]] +=== Union types + +{esql} has a way to handle <>. When the same field is mapped to multiple types in multiple indices, +the type of the field is understood to be a _union_ of the various types in the index mappings. +As seen in the preceding examples, this _union type_ cannot be used in the results, +and cannot be referred to by the query +-- except when it's passed to a type conversion function that accepts all the types in the _union_ and converts the field +to a single type. {esql} offers a suite of <> to achieve this. + +In the above examples, the query can use a command like `EVAL client_ip = TO_IP(client_ip)` to resolve +the union of `ip` and `keyword` to just `ip`. +You can also use the type-conversion syntax `EVAL client_ip = client_ip::IP`. +Alternatively, the query could use <> to convert all supported types into `KEYWORD`. + +For example, the <> that returned `client_ip:unsupported` with `null` values can be improved using the `TO_IP` function or the equivalent `field::ip` syntax. +These changes also resolve the error message. +As long as the only reference to the original field is to pass it to a conversion function that resolves the type ambiguity, no error results. + +[source.merge.styled,esql] +---- +FROM events_* +| EVAL client_ip = TO_IP(client_ip) +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +@timestamp:date | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +|=== + +[discrete] +[[esql-multi-index-index-metadata]] +=== Index metadata + +It can be helpful to know the particular index from which each row is sourced. +To get this information, use the <> option on the <> command. + +[source.merge.styled,esql] +---- +FROM events_* METADATA _index +| EVAL client_ip = TO_IP(client_ip) +| KEEP _index, @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword +events_ip | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +events_ip | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +events_ip | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +events_keyword | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +events_keyword | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +events_keyword | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +events_keyword | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +|=== diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index 3e045163069ec..d2e18bf1b91a3 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -12,6 +12,9 @@ and set up alerts. Using {esql} in {elastic-sec} to investigate events in Timeline, create detection rules, and build {esql} queries using Elastic AI Assistant. +<>:: +Using {esql} to query multiple indexes and resolve field type mismatches. + <>:: Using {esql} to query across multiple clusters. @@ -21,5 +24,6 @@ Using the <> to list and cancel {esql} queries. include::esql-rest.asciidoc[] include::esql-kibana.asciidoc[] include::esql-security-solution.asciidoc[] +include::esql-multi-index.asciidoc[] include::esql-across-clusters.asciidoc[] include::task-management.asciidoc[] diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index 9ab21e8996aa0..1abe7dcb2fa9b 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -58,24 +58,22 @@ today's index: FROM ---- -Use comma-separated lists or wildcards to query multiple data streams, indices, -or aliases: +Use comma-separated lists or wildcards to <>: [source,esql] ---- FROM employees-00001,other-employees-* ---- -Use the format `:` to query data streams and indices -on remote clusters: +Use the format `:` to <>: [source,esql] ---- FROM cluster_one:employees-00001,cluster_two:other-employees-* ---- -See <>. - Use the optional `METADATA` directive to enable <>: [source,esql] From 281d529c033892d3507b21b8809560688bdfe234 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Jul 2024 14:15:18 +0100 Subject: [PATCH 066/406] Improve MAX_DOCS troubleshooting docs (#110920) Backports the docs changes from #110911 --- .../how-to/size-your-shards.asciidoc | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 53f47fc88cdb2..31f4039bcfaca 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -546,23 +546,30 @@ PUT _cluster/settings ---- [discrete] +[[troubleshooting-max-docs-limit]] ==== Number of documents in the shard cannot exceed [2147483519] - -Elasticsearch shards reflect Lucene's underlying https://github.com/apache/lucene/issues/5176[index -`MAX_DOC` hard limit] of 2,147,483,519 (`(2^31)-129`) docs. This figure is -the sum of `docs.count` plus `docs.deleted` as reported by the <> -per shard. Exceeding this limit will result in errors like the following: +Each {es} shard is a separate Lucene index, so it shares Lucene's +https://github.com/apache/lucene/issues/5176[`MAX_DOC` limit] of having at most +2,147,483,519 (`(2^31)-129`) documents. This per-shard limit applies to the sum +of `docs.count` plus `docs.deleted` as reported by the <>. Exceeding this limit will result in errors like the following: [source,txt] ---- Elasticsearch exception [type=illegal_argument_exception, reason=Number of documents in the shard cannot exceed [2147483519]] ---- -TIP: This calculation may differ from the <> calculation, because the Count API does not include nested documents. +TIP: This calculation may differ from the <> +calculation, because the Count API does not include nested documents and does +not count deleted documents. +This limit is much higher than the <> of approximately 200M documents per shard. -Try using the <> to clear deleted docs. For example: +If you encounter this problem, try to mitigate it by using the +<> to merge away some deleted docs. For +example: [source,console] ---- @@ -570,10 +577,9 @@ POST my-index-000001/_forcemerge?only_expunge_deletes=true ---- // TEST[setup:my_index] -This will launch an asynchronous task which can be monitored via the <>. - -For a long-term solution try: +This will launch an asynchronous task which can be monitored via the +<>. -* <> -* aligning the index to recommendations on this page by either -<> or <> the index +It may also be helpful to <>, +or to <> or <> the index into +one with a larger number of shards. From ef93c95227a3f603b9dfc3264bd3637cc39dea91 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 16 Jul 2024 15:39:20 +0100 Subject: [PATCH 067/406] Add a cluster listener to fix missing cluster features after upgrade (#110710) (#110924) Non-master-eligible nodes that are already part of a cluster when the master is upgraded don't re-join the cluster, so their cluster features never get updated. This adds a cluster listener that spots this occurring, and manually gets the node's features with a new transport action and updates the cluster state after the fact. --- docs/changelog/110710.yaml | 6 + .../elasticsearch/action/ActionModule.java | 2 + .../cluster/node/features/NodeFeatures.java | 42 +++ .../node/features/NodesFeaturesRequest.java | 17 ++ .../node/features/NodesFeaturesResponse.java | 35 +++ .../TransportNodesFeaturesAction.java | 91 +++++++ .../elasticsearch/cluster/ClusterState.java | 5 + .../features/NodeFeaturesFixupListener.java | 217 ++++++++++++++++ .../elasticsearch/node/NodeConstruction.java | 2 + .../NodeFeaturesFixupListenerTests.java | 245 ++++++++++++++++++ .../xpack/security/operator/Constants.java | 1 + 11 files changed, 663 insertions(+) create mode 100644 docs/changelog/110710.yaml create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodeFeatures.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesRequest.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesResponse.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListener.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListenerTests.java diff --git a/docs/changelog/110710.yaml b/docs/changelog/110710.yaml new file mode 100644 index 0000000000000..bf3349ee25cdd --- /dev/null +++ b/docs/changelog/110710.yaml @@ -0,0 +1,6 @@ +pr: 110710 +summary: Add a cluster listener to fix missing node features after upgrading from a version prior to 8.13 +area: Infra/Core +type: bug +issues: + - 109254 diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index b550755ce7bdd..a9c6894355cb6 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction; import org.elasticsearch.action.admin.cluster.migration.TransportPostFeatureUpgradeAction; import org.elasticsearch.action.admin.cluster.node.capabilities.TransportNodesCapabilitiesAction; +import org.elasticsearch.action.admin.cluster.node.features.TransportNodesFeaturesAction; import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; @@ -621,6 +622,7 @@ public void reg actions.register(TransportNodesInfoAction.TYPE, TransportNodesInfoAction.class); actions.register(TransportRemoteInfoAction.TYPE, TransportRemoteInfoAction.class); actions.register(TransportNodesCapabilitiesAction.TYPE, TransportNodesCapabilitiesAction.class); + actions.register(TransportNodesFeaturesAction.TYPE, TransportNodesFeaturesAction.class); actions.register(RemoteClusterNodesAction.TYPE, RemoteClusterNodesAction.TransportAction.class); actions.register(TransportNodesStatsAction.TYPE, TransportNodesStatsAction.class); actions.register(TransportNodesUsageAction.TYPE, TransportNodesUsageAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodeFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodeFeatures.java new file mode 100644 index 0000000000000..b33520624d114 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodeFeatures.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.features; + +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Set; + +public class NodeFeatures extends BaseNodeResponse { + + private final Set features; + + public NodeFeatures(StreamInput in) throws IOException { + super(in); + features = in.readCollectionAsImmutableSet(StreamInput::readString); + } + + public NodeFeatures(Set features, DiscoveryNode node) { + super(node); + this.features = Set.copyOf(features); + } + + public Set nodeFeatures() { + return features; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(features, StreamOutput::writeString); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesRequest.java new file mode 100644 index 0000000000000..83b6fff7cf2b2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesRequest.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.features; + +import org.elasticsearch.action.support.nodes.BaseNodesRequest; + +public class NodesFeaturesRequest extends BaseNodesRequest { + public NodesFeaturesRequest(String... nodes) { + super(nodes); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesResponse.java new file mode 100644 index 0000000000000..0fca588216b15 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/NodesFeaturesResponse.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.features; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.List; + +public class NodesFeaturesResponse extends BaseNodesResponse { + public NodesFeaturesResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return TransportAction.localOnly(); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + TransportAction.localOnly(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java new file mode 100644 index 0000000000000..d1b7a4f1b7e95 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.features; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +@UpdateForV9 +// @UpdateForV10 // this can be removed in v10. It may be called by v8 nodes to v9 nodes. +public class TransportNodesFeaturesAction extends TransportNodesAction< + NodesFeaturesRequest, + NodesFeaturesResponse, + TransportNodesFeaturesAction.NodeFeaturesRequest, + NodeFeatures> { + + public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/features"); + + private final FeatureService featureService; + + @Inject + public TransportNodesFeaturesAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + FeatureService featureService + ) { + super( + TYPE.name(), + clusterService, + transportService, + actionFilters, + NodeFeaturesRequest::new, + threadPool.executor(ThreadPool.Names.MANAGEMENT) + ); + this.featureService = featureService; + } + + @Override + protected NodesFeaturesResponse newResponse( + NodesFeaturesRequest request, + List responses, + List failures + ) { + return new NodesFeaturesResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected NodeFeaturesRequest newNodeRequest(NodesFeaturesRequest request) { + return new NodeFeaturesRequest(); + } + + @Override + protected NodeFeatures newNodeResponse(StreamInput in, DiscoveryNode node) throws IOException { + return new NodeFeatures(in); + } + + @Override + protected NodeFeatures nodeOperation(NodeFeaturesRequest request, Task task) { + return new NodeFeatures(featureService.getNodeFeatures().keySet(), transportService.getLocalNode()); + } + + public static class NodeFeaturesRequest extends TransportRequest { + public NodeFeaturesRequest(StreamInput in) throws IOException { + super(in); + } + + public NodeFeaturesRequest() {} + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index f9294210e0a6a..c54269da68507 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -884,6 +884,11 @@ public Map> nodeFeatures() { return Collections.unmodifiableMap(this.nodeFeatures); } + public Builder putNodeFeatures(String node, Set features) { + this.nodeFeatures.put(node, features); + return this; + } + public Builder routingTable(RoutingTable.Builder routingTableBuilder) { return routingTable(routingTableBuilder.build()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListener.java new file mode 100644 index 0000000000000..c8b2555c0f15d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListener.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.features; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.cluster.node.features.NodeFeatures; +import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesRequest; +import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesResponse; +import org.elasticsearch.action.admin.cluster.node.features.TransportNodesFeaturesAction; +import org.elasticsearch.client.internal.ClusterAdminClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterFeatures; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executor; +import java.util.stream.Collectors; + +@UpdateForV9 // this can be removed in v9 +public class NodeFeaturesFixupListener implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(NodeFeaturesFixupListener.class); + + private static final TimeValue RETRY_TIME = TimeValue.timeValueSeconds(30); + + private final MasterServiceTaskQueue taskQueue; + private final ClusterAdminClient client; + private final Scheduler scheduler; + private final Executor executor; + private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); + + public NodeFeaturesFixupListener(ClusterService service, ClusterAdminClient client, ThreadPool threadPool) { + // there tends to be a lot of state operations on an upgrade - this one is not time-critical, + // so use LOW priority. It just needs to be run at some point after upgrade. + this( + service.createTaskQueue("fix-node-features", Priority.LOW, new NodesFeaturesUpdater()), + client, + threadPool, + threadPool.executor(ThreadPool.Names.CLUSTER_COORDINATION) + ); + } + + NodeFeaturesFixupListener( + MasterServiceTaskQueue taskQueue, + ClusterAdminClient client, + Scheduler scheduler, + Executor executor + ) { + this.taskQueue = taskQueue; + this.client = client; + this.scheduler = scheduler; + this.executor = executor; + } + + class NodesFeaturesTask implements ClusterStateTaskListener { + private final Map> results; + private final int retryNum; + + NodesFeaturesTask(Map> results, int retryNum) { + this.results = results; + this.retryNum = retryNum; + } + + @Override + public void onFailure(Exception e) { + logger.error("Could not apply features for nodes {} to cluster state", results.keySet(), e); + scheduleRetry(results.keySet(), retryNum); + } + + public Map> results() { + return results; + } + } + + static class NodesFeaturesUpdater implements ClusterStateTaskExecutor { + @Override + public ClusterState execute(BatchExecutionContext context) { + ClusterState.Builder builder = ClusterState.builder(context.initialState()); + var existingFeatures = builder.nodeFeatures(); + + boolean modified = false; + for (var c : context.taskContexts()) { + for (var e : c.getTask().results().entrySet()) { + // double check there are still no features for the node + if (existingFeatures.getOrDefault(e.getKey(), Set.of()).isEmpty()) { + builder.putNodeFeatures(e.getKey(), e.getValue()); + modified = true; + } + } + c.success(() -> {}); + } + return modified ? builder.build() : context.initialState(); + } + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesDelta().masterNodeChanged() && event.localNodeMaster()) { + /* + * Execute this if we have just become master. + * Check if there are any nodes that should have features in cluster state, but don't. + * This can happen if the master was upgraded from before 8.13, and one or more non-master nodes + * were already upgraded. They don't re-join the cluster with the new master, so never get their features + * (which the master now understands) added to cluster state. + * So we need to do a separate transport call to get the node features and add them to cluster state. + * We can't use features to determine when this should happen, as the features are incorrect. + * We also can't use transport version, as that is unreliable for upgrades + * from versions before 8.8 (see TransportVersionFixupListener). + * So the only thing we can use is release version. + * This is ok here, as Serverless will never hit this case, so the node feature fetch action will never be called on Serverless. + * This whole class will be removed in ES v9. + */ + ClusterFeatures nodeFeatures = event.state().clusterFeatures(); + Set queryNodes = event.state() + .nodes() + .stream() + .filter(n -> n.getVersion().onOrAfter(Version.V_8_15_0)) + .map(DiscoveryNode::getId) + .filter(n -> getNodeFeatures(nodeFeatures, n).isEmpty()) + .collect(Collectors.toSet()); + + if (queryNodes.isEmpty() == false) { + logger.debug("Fetching actual node features for nodes {}", queryNodes); + queryNodesFeatures(queryNodes, 0); + } + } + } + + @SuppressForbidden(reason = "Need to access a specific node's features") + private static Set getNodeFeatures(ClusterFeatures features, String nodeId) { + return features.nodeFeatures().getOrDefault(nodeId, Set.of()); + } + + private void scheduleRetry(Set nodes, int thisRetryNum) { + // just keep retrying until this succeeds + logger.debug("Scheduling retry {} for nodes {}", thisRetryNum + 1, nodes); + scheduler.schedule(() -> queryNodesFeatures(nodes, thisRetryNum + 1), RETRY_TIME, executor); + } + + private void queryNodesFeatures(Set nodes, int retryNum) { + // some might already be in-progress + Set outstandingNodes = Sets.newHashSetWithExpectedSize(nodes.size()); + synchronized (pendingNodes) { + for (String n : nodes) { + if (pendingNodes.add(n)) { + outstandingNodes.add(n); + } + } + } + if (outstandingNodes.isEmpty()) { + // all nodes already have in-progress requests + return; + } + + NodesFeaturesRequest request = new NodesFeaturesRequest(outstandingNodes.toArray(String[]::new)); + client.execute(TransportNodesFeaturesAction.TYPE, request, new ActionListener<>() { + @Override + public void onResponse(NodesFeaturesResponse response) { + pendingNodes.removeAll(outstandingNodes); + handleResponse(response, retryNum); + } + + @Override + public void onFailure(Exception e) { + pendingNodes.removeAll(outstandingNodes); + logger.warn("Could not read features for nodes {}", outstandingNodes, e); + scheduleRetry(outstandingNodes, retryNum); + } + }); + } + + private void handleResponse(NodesFeaturesResponse response, int retryNum) { + if (response.hasFailures()) { + Set failedNodes = new HashSet<>(); + for (FailedNodeException fne : response.failures()) { + logger.warn("Failed to read features from node {}", fne.nodeId(), fne); + failedNodes.add(fne.nodeId()); + } + scheduleRetry(failedNodes, retryNum); + } + // carry on and read what we can + + Map> results = response.getNodes() + .stream() + .collect(Collectors.toUnmodifiableMap(n -> n.getNode().getId(), NodeFeatures::nodeFeatures)); + + if (results.isEmpty() == false) { + taskQueue.submitTask("fix-node-features", new NodesFeaturesTask(results, retryNum), null); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index aa0f9b8552d22..83e23d9a60b58 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.MasterHistoryService; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; +import org.elasticsearch.cluster.features.NodeFeaturesFixupListener; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; @@ -752,6 +753,7 @@ private void construct( clusterService.addListener( new TransportVersionsFixupListener(clusterService, client.admin().cluster(), featureService, threadPool) ); + clusterService.addListener(new NodeFeaturesFixupListener(clusterService, client.admin().cluster(), threadPool)); } SourceFieldMetrics sourceFieldMetrics = new SourceFieldMetrics( diff --git a/server/src/test/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListenerTests.java new file mode 100644 index 0000000000000..30d4c85e8fb67 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListenerTests.java @@ -0,0 +1,245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.features; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.features.NodeFeatures; +import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesRequest; +import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesResponse; +import org.elasticsearch.action.admin.cluster.node.features.TransportNodesFeaturesAction; +import org.elasticsearch.client.internal.ClusterAdminClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.features.NodeFeaturesFixupListener.NodesFeaturesTask; +import org.elasticsearch.cluster.features.NodeFeaturesFixupListener.NodesFeaturesUpdater; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; +import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.Scheduler; +import org.mockito.ArgumentCaptor; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executor; + +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.hamcrest.MockitoHamcrest.argThat; + +public class NodeFeaturesFixupListenerTests extends ESTestCase { + + @SuppressWarnings("unchecked") + private static MasterServiceTaskQueue newMockTaskQueue() { + return mock(MasterServiceTaskQueue.class); + } + + private static DiscoveryNodes nodes(Version... versions) { + var builder = DiscoveryNodes.builder(); + for (int i = 0; i < versions.length; i++) { + builder.add(DiscoveryNodeUtils.create("node" + i, new TransportAddress(TransportAddress.META_ADDRESS, 9200 + i), versions[i])); + } + builder.localNodeId("node0").masterNodeId("node0"); + return builder.build(); + } + + private static DiscoveryNodes nodes(VersionInformation... versions) { + var builder = DiscoveryNodes.builder(); + for (int i = 0; i < versions.length; i++) { + builder.add( + DiscoveryNodeUtils.builder("node" + i) + .address(new TransportAddress(TransportAddress.META_ADDRESS, 9200 + i)) + .version(versions[i]) + .build() + ); + } + builder.localNodeId("node0").masterNodeId("node0"); + return builder.build(); + } + + @SafeVarargs + private static Map> features(Set... nodeFeatures) { + Map> features = new HashMap<>(); + for (int i = 0; i < nodeFeatures.length; i++) { + features.put("node" + i, nodeFeatures[i]); + } + return features; + } + + private static NodesFeaturesResponse getResponse(Map> responseData) { + return new NodesFeaturesResponse( + ClusterName.DEFAULT, + responseData.entrySet() + .stream() + .map( + e -> new NodeFeatures( + e.getValue(), + DiscoveryNodeUtils.create(e.getKey(), new TransportAddress(TransportAddress.META_ADDRESS, 9200)) + ) + ) + .toList(), + List.of() + ); + } + + public void testNothingDoneWhenNothingToFix() { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + + ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(nodes(Version.CURRENT, Version.CURRENT)) + .nodeFeatures(features(Set.of("f1", "f2"), Set.of("f1", "f2"))) + .build(); + + NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, null, null); + listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); + + verify(taskQueue, never()).submitTask(anyString(), any(), any()); + } + + public void testFeaturesFixedAfterNewMaster() throws Exception { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + Set features = Set.of("f1", "f2"); + + ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) + .nodeFeatures(features(features, Set.of(), Set.of())) + .build(); + + ArgumentCaptor> action = ArgumentCaptor.captor(); + ArgumentCaptor task = ArgumentCaptor.captor(); + + NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, null, null); + listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); + verify(client).execute( + eq(TransportNodesFeaturesAction.TYPE), + argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), + action.capture() + ); + + action.getValue().onResponse(getResponse(Map.of("node1", features, "node2", features))); + verify(taskQueue).submitTask(anyString(), task.capture(), any()); + + ClusterState newState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + testState, + new NodesFeaturesUpdater(), + List.of(task.getValue()) + ); + + assertThat(newState.clusterFeatures().allNodeFeatures(), containsInAnyOrder("f1", "f2")); + } + + public void testFeaturesFetchedOnlyForUpdatedNodes() { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + + ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes( + nodes( + VersionInformation.CURRENT, + VersionInformation.CURRENT, + new VersionInformation(Version.V_8_12_0, IndexVersion.current(), IndexVersion.current()) + ) + ) + .nodeFeatures(features(Set.of("f1", "f2"), Set.of(), Set.of())) + .build(); + + ArgumentCaptor> action = ArgumentCaptor.captor(); + + NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, null, null); + listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); + verify(client).execute( + eq(TransportNodesFeaturesAction.TYPE), + argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1"))), + action.capture() + ); + } + + public void testConcurrentChangesDoNotOverlap() { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + Set features = Set.of("f1", "f2"); + + ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) + .nodeFeatures(features(features, Set.of(), Set.of())) + .build(); + + NodeFeaturesFixupListener listeners = new NodeFeaturesFixupListener(taskQueue, client, null, null); + listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); + verify(client).execute( + eq(TransportNodesFeaturesAction.TYPE), + argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), + any() + ); + // don't send back the response yet + + ClusterState testState2 = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) + .nodeFeatures(features(features, features, Set.of())) + .build(); + // should not send any requests + listeners.clusterChanged(new ClusterChangedEvent("test", testState2, testState1)); + verifyNoMoreInteractions(client); + } + + public void testFailedRequestsAreRetried() { + MasterServiceTaskQueue taskQueue = newMockTaskQueue(); + ClusterAdminClient client = mock(ClusterAdminClient.class); + Scheduler scheduler = mock(Scheduler.class); + Executor executor = mock(Executor.class); + Set features = Set.of("f1", "f2"); + + ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) + .nodeFeatures(features(features, Set.of(), Set.of())) + .build(); + + ArgumentCaptor> action = ArgumentCaptor.captor(); + ArgumentCaptor retry = ArgumentCaptor.forClass(Runnable.class); + + NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, scheduler, executor); + listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); + verify(client).execute( + eq(TransportNodesFeaturesAction.TYPE), + argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), + action.capture() + ); + + action.getValue().onFailure(new RuntimeException("failure")); + verify(scheduler).schedule(retry.capture(), any(), same(executor)); + + // running the retry should cause another call + retry.getValue().run(); + verify(client, times(2)).execute( + eq(TransportNodesFeaturesAction.TYPE), + argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), + action.capture() + ); + } +} diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index ffa4d1082c7e6..c04d531ce3fdb 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -353,6 +353,7 @@ public class Constants { "cluster:monitor/main", "cluster:monitor/nodes/capabilities", "cluster:monitor/nodes/data_tier_usage", + "cluster:monitor/nodes/features", "cluster:monitor/nodes/hot_threads", "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats", From 1351cb48fb10c6743e49e8b842235db0c6e7389a Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 16 Jul 2024 18:35:13 +0200 Subject: [PATCH 068/406] Tweak the logs index mode changelog. (#110929) --- docs/changelog/108896.yaml | 16 +++++++++++++++- docs/changelog/109025.yaml | 4 ++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/docs/changelog/108896.yaml b/docs/changelog/108896.yaml index c52f074b65605..a7821c7670a25 100644 --- a/docs/changelog/108896.yaml +++ b/docs/changelog/108896.yaml @@ -1,6 +1,20 @@ pr: 108896 -summary: Introduce `logs` index mode as Tech Preview +summary: Introduce logs index mode as Tech Preview area: Logs type: feature issues: - 108896 +highlight: + title: Introduce logs index mode as Tech Preview + body: |- + This change introduces a new index mode named `logs`. + When the new index mode is enabled then the following storage savings features are enabled automatically: + * Synthetic source, which omits storing the _source. When _source or part of it is requested it is synthesized on the fly at runtime. + * Index sorting. By default indices are sorted by `host.name` and `@timestamp` fields at index time. This can be overwritten if other sorting fields yield better compression rate. + * Enable more space efficient compression for fields with doc values enabled. These are the same codecs used + when `time_series` index mode is enabled. + + The `index.mode` index setting set to `logs` should be configured in index templates or defined when creating a plain index. + Benchmarks and other tests have shown that logs data sets use around 2.5 times less storage with the new index mode enabled compared to not configuring it. + The new `logs` index mode is a tech preview feature. + notable: true diff --git a/docs/changelog/109025.yaml b/docs/changelog/109025.yaml index 38d19cab13d30..77a3a1180bc93 100644 --- a/docs/changelog/109025.yaml +++ b/docs/changelog/109025.yaml @@ -1,6 +1,6 @@ pr: 109025 -summary: Introduce a setting controlling the activation of the `logs` index mode in logs@settings +summary: Introduce a node setting controlling the activation of the `logs` index mode in logs@settings component template area: Logs -type: feature +type: enhancement issues: - 108762 From 47cdc55dce79b0b284727be118baa07bcfd274c4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 16 Jul 2024 19:32:53 +0200 Subject: [PATCH 069/406] Fix leak in collapsing search results (#110927) (#110939) Fixing this case for now by enforcing unpooled to plug the leak, this needs a little more work to function well pooled. --- docs/changelog/110927.yaml | 5 ++++ .../search/CollapseSearchResultsIT.java | 23 +++++++++++++++++++ .../action/search/ExpandSearchPhase.java | 4 ++++ 3 files changed, 32 insertions(+) create mode 100644 docs/changelog/110927.yaml diff --git a/docs/changelog/110927.yaml b/docs/changelog/110927.yaml new file mode 100644 index 0000000000000..3602ce3e811fa --- /dev/null +++ b/docs/changelog/110927.yaml @@ -0,0 +1,5 @@ +pr: 110927 +summary: Fix leak in collapsing search results +area: Search +type: bug +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java index f5fdd752a6f57..aa721122c2160 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java @@ -61,4 +61,27 @@ public void testCollapseWithDocValueFields() { } ); } + + public void testCollapseWithFields() { + final String indexName = "test_collapse"; + createIndex(indexName); + final String collapseField = "collapse_field"; + final String otherField = "other_field"; + assertAcked(indicesAdmin().preparePutMapping(indexName).setSource(collapseField, "type=keyword", otherField, "type=keyword")); + index(indexName, "id_1_0", Map.of(collapseField, "value1", otherField, "other_value1")); + index(indexName, "id_1_1", Map.of(collapseField, "value1", otherField, "other_value2")); + index(indexName, "id_2_0", Map.of(collapseField, "value2", otherField, "other_value3")); + refresh(indexName); + + assertNoFailuresAndResponse( + prepareSearch(indexName).setQuery(new MatchAllQueryBuilder()) + .setFetchSource(false) + .addFetchField(otherField) + .setCollapse(new CollapseBuilder(collapseField).setInnerHits(new InnerHitBuilder("ih").setSize(2))), + searchResponse -> { + assertEquals(collapseField, searchResponse.getHits().getCollapseField()); + assertEquals(Set.of(new BytesRef("value1"), new BytesRef("value2")), Set.of(searchResponse.getHits().getCollapseValues())); + } + ); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index e8470ba77632f..e2385745149c1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -100,6 +100,10 @@ private void doRun() { if (hit.getInnerHits() == null) { hit.setInnerHits(Maps.newMapWithExpectedSize(innerHitBuilders.size())); } + if (hit.isPooled() == false) { + // TODO: make this work pooled by forcing the hit itself to become pooled as needed here + innerHits = innerHits.asUnpooled(); + } hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); assert innerHits.isPooled() == false || hit.isPooled() : "pooled inner hits can only be added to a pooled hit"; innerHits.mustIncRef(); From 107eaa7b47c1e1f3815016f0576ee7446cb8418b Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 17 Jul 2024 06:51:06 +1000 Subject: [PATCH 070/406] Mute org.elasticsearch.preallocate.PreallocateTests testPreallocate #110948 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 3653617bd2ce6..ffe2e078938d9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -81,6 +81,9 @@ tests: method: "testCreateAndRestorePartialSearchableSnapshot" - class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" issue: "https://github.com/elastic/elasticsearch/issues/110591" +- class: org.elasticsearch.preallocate.PreallocateTests + method: testPreallocate + issue: https://github.com/elastic/elasticsearch/issues/110948 # Examples: # From 75fbb7d16de5f231c730fcbf6ad91772da32a633 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 17 Jul 2024 08:12:43 +0100 Subject: [PATCH 071/406] Remove typo put-lifecycle.asciidoc (#110875) (#110919) --- .../data-streams/lifecycle/apis/put-lifecycle.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 6bd157071f54e..7d33a5b5f880c 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -54,7 +54,7 @@ duration the document could be deleted. When empty, every document in this data `enabled`:: (Optional, boolean) -If defined, it turns data streqm lifecycle on/off (`true`/`false`) for this data stream. +If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (`enabled: false`) will have no effect on the data stream. Defaults to `true`. From 1284a3580a08ce0c71097cc407494a5ffed547c7 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 17 Jul 2024 14:33:42 +0200 Subject: [PATCH 072/406] [8.15] ESQL: Validate unique plan attribute names (#110488) (#110966) * ESQL: Validate unique plan attribute names (#110488) * Enforce an invariant in our dependency checker so that logical plans never have duplicate output attribute names or ids. * Fix ROW to not produce columns with duplicate names. * Fix ResolveUnionTypes to not create multiple synthetic field attributes for the same union type. * Add tests for commands using the same column name more than once. * Update docs w.r.t. how commands behave if they are used with duplicate column names. (cherry picked from commit da5392134fcb806198921b0b25620141ce6a4798) # Conflicts: # x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java * Remove unrelated csv tests These slipped in via merge conflicts. --- docs/changelog/110488.yaml | 6 ++ .../esql/processing-commands/dissect.asciidoc | 2 + .../esql/processing-commands/enrich.asciidoc | 7 +- .../esql/processing-commands/eval.asciidoc | 4 +- .../esql/processing-commands/grok.asciidoc | 15 ++++ .../esql/processing-commands/keep.asciidoc | 4 +- .../esql/processing-commands/lookup.asciidoc | 1 + .../esql/processing-commands/rename.asciidoc | 4 +- .../esql/processing-commands/stats.asciidoc | 3 + .../esql/source-commands/row.asciidoc | 1 + .../esql/core/analyzer/AnalyzerRules.java | 12 +--- .../xpack/esql/CsvTestsDataLoader.java | 5 +- .../src/main/resources/addresses.csv | 4 ++ .../src/main/resources/dissect.csv-spec | 25 +++++++ .../src/main/resources/docs.csv-spec | 17 +++++ .../src/main/resources/drop.csv-spec | 50 ++++++++++++++ .../src/main/resources/enrich.csv-spec | 68 +++++++++++++++++++ .../src/main/resources/eval.csv-spec | 23 +++++++ .../src/main/resources/grok.csv-spec | 25 +++++++ .../src/main/resources/keep.csv-spec | 60 ++++++++++++++++ .../src/main/resources/mapping-addresses.json | 44 ++++++++++++ .../src/main/resources/rename.csv-spec | 39 +++++++++++ .../src/main/resources/row.csv-spec | 23 ++++++- .../src/main/resources/stats.csv-spec | 33 +++++++++ .../xpack/esql/action/EsqlCapabilities.java | 8 ++- .../xpack/esql/analysis/Analyzer.java | 24 +++++-- .../xpack/esql/optimizer/OptimizerRules.java | 22 +++++- .../xpack/esql/parser/LogicalPlanBuilder.java | 4 +- .../xpack/esql/plan/logical/Rename.java | 7 ++ 29 files changed, 514 insertions(+), 26 deletions(-) create mode 100644 docs/changelog/110488.yaml create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-addresses.json diff --git a/docs/changelog/110488.yaml b/docs/changelog/110488.yaml new file mode 100644 index 0000000000000..fbb439f20fc96 --- /dev/null +++ b/docs/changelog/110488.yaml @@ -0,0 +1,6 @@ +pr: 110488 +summary: "ESQL: Validate unique plan attribute names" +area: ES|QL +type: bug +issues: + - 110541 diff --git a/docs/reference/esql/processing-commands/dissect.asciidoc b/docs/reference/esql/processing-commands/dissect.asciidoc index c48b72af0de7e..3dca50c8aee5e 100644 --- a/docs/reference/esql/processing-commands/dissect.asciidoc +++ b/docs/reference/esql/processing-commands/dissect.asciidoc @@ -17,6 +17,8 @@ multiple values, `DISSECT` will process each value. `pattern`:: A <>. +If a field name conflicts with an existing column, the existing column is dropped. +If a field name is used more than once, only the rightmost duplicate creates a column. ``:: A string used as the separator between appended values, when using the <>. diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc index 5470d81b2f40b..844cc2c62d1ed 100644 --- a/docs/reference/esql/processing-commands/enrich.asciidoc +++ b/docs/reference/esql/processing-commands/enrich.asciidoc @@ -28,11 +28,16 @@ name as the `match_field` defined in the <>. The enrich fields from the enrich index that are added to the result as new columns. If a column with the same name as the enrich field already exists, the existing column will be replaced by the new column. If not specified, each of -the enrich fields defined in the policy is added +the enrich fields defined in the policy is added. +A column with the same name as the enrich field will be dropped unless the +enrich field is renamed. `new_nameX`:: Enables you to change the name of the column that's added for each of the enrich fields. Defaults to the enrich field name. +If a column has the same name as the new name, it will be discarded. +If a name (new or original) occurs more than once, only the rightmost duplicate +creates a new column. *Description* diff --git a/docs/reference/esql/processing-commands/eval.asciidoc b/docs/reference/esql/processing-commands/eval.asciidoc index 9b34fca7ceeff..be69d775b2755 100644 --- a/docs/reference/esql/processing-commands/eval.asciidoc +++ b/docs/reference/esql/processing-commands/eval.asciidoc @@ -13,10 +13,12 @@ EVAL [column1 =] value1[, ..., [columnN =] valueN] `columnX`:: The column name. +If a column with the same name already exists, the existing column is dropped. +If a column name is used more than once, only the rightmost duplicate creates a column. `valueX`:: The value for the column. Can be a literal, an expression, or a -<>. +<>. Can use columns defined left of this one. *Description* diff --git a/docs/reference/esql/processing-commands/grok.asciidoc b/docs/reference/esql/processing-commands/grok.asciidoc index d5d58a9eaee12..58493a13359d2 100644 --- a/docs/reference/esql/processing-commands/grok.asciidoc +++ b/docs/reference/esql/processing-commands/grok.asciidoc @@ -17,6 +17,9 @@ multiple values, `GROK` will process each value. `pattern`:: A grok pattern. +If a field name conflicts with an existing column, the existing column is discarded. +If a field name is used more than once, a multi-valued column will be created with one value +per each occurrence of the field name. *Description* @@ -64,4 +67,16 @@ include::{esql-specs}/docs.csv-spec[tag=grokWithToDatetime] |=== include::{esql-specs}/docs.csv-spec[tag=grokWithToDatetime-result] |=== + +If a field name is used more than once, `GROK` creates a multi-valued +column: + +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=grokWithDuplicateFieldNames] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=grokWithDuplicateFieldNames-result] +|=== // end::examples[] diff --git a/docs/reference/esql/processing-commands/keep.asciidoc b/docs/reference/esql/processing-commands/keep.asciidoc index 57f32a68aec4c..a07afa64a756c 100644 --- a/docs/reference/esql/processing-commands/keep.asciidoc +++ b/docs/reference/esql/processing-commands/keep.asciidoc @@ -13,6 +13,8 @@ KEEP columns `columns`:: A comma-separated list of columns to keep. Supports wildcards. +See below for the behavior in case an existing column matches multiple +given wildcards or column names. *Description* @@ -26,7 +28,7 @@ Fields are added in the order they appear. If one field matches multiple express 2. Partial wildcard expressions (for example: `fieldNam*`) 3. Wildcard only (`*`) -If a field matches two expressions with the same precedence, the right-most expression wins. +If a field matches two expressions with the same precedence, the rightmost expression wins. Refer to the examples for illustrations of these precedence rules. diff --git a/docs/reference/esql/processing-commands/lookup.asciidoc b/docs/reference/esql/processing-commands/lookup.asciidoc index 1944d243968a8..142bcb93dc445 100644 --- a/docs/reference/esql/processing-commands/lookup.asciidoc +++ b/docs/reference/esql/processing-commands/lookup.asciidoc @@ -15,6 +15,7 @@ LOOKUP table ON match_field1[, match_field2, ...] `table`:: The name of the `table` provided in the request to match. +If the table's column names conflict with existing columns, the existing columns will be dropped. `match_field`:: The fields in the input to match against the table. diff --git a/docs/reference/esql/processing-commands/rename.asciidoc b/docs/reference/esql/processing-commands/rename.asciidoc index 773fe8b640f75..0f338ed6e15e8 100644 --- a/docs/reference/esql/processing-commands/rename.asciidoc +++ b/docs/reference/esql/processing-commands/rename.asciidoc @@ -15,7 +15,9 @@ RENAME old_name1 AS new_name1[, ..., old_nameN AS new_nameN] The name of a column you want to rename. `new_nameX`:: -The new name of the column. +The new name of the column. If it conflicts with an existing column name, +the existing column is dropped. If multiple columns are renamed to the same +name, all but the rightmost column with the same new name are dropped. *Description* diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc index fe84c56bbfc19..db533866a0b1b 100644 --- a/docs/reference/esql/processing-commands/stats.asciidoc +++ b/docs/reference/esql/processing-commands/stats.asciidoc @@ -15,12 +15,15 @@ STATS [column1 =] expression1[, ..., [columnN =] expressionN] `columnX`:: The name by which the aggregated value is returned. If omitted, the name is equal to the corresponding expression (`expressionX`). +If multiple columns have the same name, all but the rightmost column with this +name will be ignored. `expressionX`:: An expression that computes an aggregated value. `grouping_expressionX`:: An expression that outputs the values to group by. +If its name coincides with one of the computed columns, that column will be ignored. NOTE: Individual `null` values are skipped when computing aggregations. diff --git a/docs/reference/esql/source-commands/row.asciidoc b/docs/reference/esql/source-commands/row.asciidoc index adce844f365b8..d127080415f37 100644 --- a/docs/reference/esql/source-commands/row.asciidoc +++ b/docs/reference/esql/source-commands/row.asciidoc @@ -13,6 +13,7 @@ ROW column1 = value1[, ..., columnN = valueN] `columnX`:: The column name. +In case of duplicate column names, only the rightmost duplicate creates a column. `valueX`:: The value for the column. Can be a literal, an expression, or a diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/AnalyzerRules.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/AnalyzerRules.java index ce188511fe7bc..83afb2ac38a4e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/AnalyzerRules.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/AnalyzerRules.java @@ -20,8 +20,6 @@ import java.util.function.Predicate; import java.util.function.Supplier; -import static java.util.Collections.singletonList; - public final class AnalyzerRules { public abstract static class AnalyzerRule extends Rule { @@ -138,14 +136,6 @@ public static List maybeResolveAgainstList( ) .toList(); - return singletonList( - ua.withUnresolvedMessage( - "Reference [" - + ua.qualifiedName() - + "] is ambiguous (to disambiguate use quotes or qualifiers); " - + "matches any of " - + refs - ) - ); + throw new IllegalStateException("Reference [" + ua.qualifiedName() + "] is ambiguous; " + "matches any of " + refs); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 530b2bc01b3d6..f9b768d67d574 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -96,8 +96,8 @@ public class CsvTestsDataLoader { "cartesian_multipolygons.csv" ); private static final TestsDataset DISTANCES = new TestsDataset("distances", "mapping-distances.json", "distances.csv"); - private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv", "k8s-settings.json", true); + private static final TestsDataset ADDRESSES = new TestsDataset("addresses", "mapping-addresses.json", "addresses.csv", null, true); public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), @@ -121,7 +121,8 @@ public class CsvTestsDataLoader { Map.entry(AIRPORT_CITY_BOUNDARIES.indexName, AIRPORT_CITY_BOUNDARIES), Map.entry(CARTESIAN_MULTIPOLYGONS.indexName, CARTESIAN_MULTIPOLYGONS), Map.entry(K8S.indexName, K8S), - Map.entry(DISTANCES.indexName, DISTANCES) + Map.entry(DISTANCES.indexName, DISTANCES), + Map.entry(ADDRESSES.indexName, ADDRESSES) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv new file mode 100644 index 0000000000000..0eea102400d60 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/addresses.csv @@ -0,0 +1,4 @@ +street:keyword,number:keyword,zip_code:keyword,city.name:keyword,city.country.name:keyword,city.country.continent.name:keyword,city.country.continent.planet.name:keyword,city.country.continent.planet.galaxy:keyword +Keizersgracht,281,1016 ED,Amsterdam,Netherlands,Europe,Earth,Milky Way +Kearny St,88,CA 94108,San Francisco,United States of America,North America,Earth,Milky Way +Marunouchi,2-7-2,100-7014,Tokyo,Japan,Asia,Earth,Milky Way diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec index 812198c324217..8c4e797b7982d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec @@ -26,6 +26,19 @@ first_name:keyword | left:keyword | full_name:keyword | right:keyword | last_nam Georgi | left | Georgi Facello | right | Facello ; +shadowingSubfields +FROM addresses +| KEEP city.country.continent.planet.name, city.country.name, city.name +| DISSECT city.name "%{city.country.continent.planet.name} %{?}" +| SORT city.name +; + +city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword +Netherlands | Amsterdam | null +United States of America | San Francisco | San +Japan | Tokyo | null +; + shadowingSelf FROM employees | KEEP first_name, last_name @@ -50,6 +63,18 @@ last_name:keyword | left:keyword | foo:keyword | middle:keyword | ri Facello | left | Georgi1 Georgi2 Facello | middle | right | Georgi1 | Georgi2 | Facello ; +shadowingInternal +FROM employees +| KEEP first_name, last_name +| WHERE last_name == "Facello" +| EVAL name = concat(first_name, "1 ", last_name) +| DISSECT name "%{foo} %{foo}" +; + +first_name:keyword | last_name:keyword | name:keyword | foo:keyword +Georgi | Facello | Georgi1 Facello | Facello +; + complexPattern ROW a = "1953-01-23T12:15:00Z - some text - 127.0.0.1;" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index d34620a9e118d..15fe6853ae491 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -436,6 +436,23 @@ ROW a = "1.2.3.4 [2023-01-23T12:15:00.000Z] Connected" // end::grokWithEscape-result[] ; +grokWithDuplicateFieldNames +// tag::grokWithDuplicateFieldNames[] +FROM addresses +| KEEP city.name, zip_code +| GROK zip_code "%{WORD:zip_parts} %{WORD:zip_parts}" +// end::grokWithDuplicateFieldNames[] +| SORT city.name +; + +// tag::grokWithDuplicateFieldNames-result[] +city.name:keyword | zip_code:keyword | zip_parts:keyword +Amsterdam | 1016 ED | ["1016", "ED"] +San Francisco | CA 94108 | ["CA", "94108"] +Tokyo | 100-7014 | null +// end::grokWithDuplicateFieldNames-result[] +; + basicDissect // tag::basicDissect[] ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec index 35530cf6fdb8e..9886d6cce0ca2 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec @@ -122,3 +122,53 @@ FROM employees | STATS COUNT(*), MIN(salary * 10), MAX(languages)| DROP `COUNT( MIN(salary * 10):i | MAX(languages):i 253240 | 5 ; + +// Not really shadowing, but let's keep the name consistent with the other command's tests +shadowingInternal +FROM employees +| SORT emp_no ASC +| KEEP emp_no, first_name, last_name +| DROP last_name, last_name +| LIMIT 2 +; + +emp_no:integer | first_name:keyword + 10001 | Georgi + 10002 | Bezalel +; + +shadowingInternalWildcard +FROM employees +| SORT emp_no ASC +| KEEP emp_no, first_name, last_name +| DROP last*name, last*name, last*, last_name +| LIMIT 2 +; + +emp_no:integer | first_name:keyword + 10001 | Georgi + 10002 | Bezalel +; + +subfields +FROM addresses +| DROP city.country.continent.planet.name, city.country.continent.name, city.country.name, number, street, zip_code, city.country.continent.planet.name +| SORT city.name +; + +city.country.continent.planet.galaxy:keyword | city.name:keyword +Milky Way | Amsterdam +Milky Way | San Francisco +Milky Way | Tokyo +; + +subfieldsWildcard +FROM addresses +| DROP *.name, number, street, zip_code, *ame +; + +city.country.continent.planet.galaxy:keyword +Milky Way +Milky Way +Milky Way +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec index fc8c48afdf8cc..cf32e028b23bc 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec @@ -69,6 +69,34 @@ ROW left = "left", foo = "foo", client_ip = "172.21.0.5", env = "env", right = " left:keyword | client_ip:keyword | env:keyword | right:keyword | foo:keyword ; +shadowingSubfields +required_capability: enrich_load +FROM addresses +| KEEP city.country.continent.planet.name, city.country.name, city.name +| EVAL city.name = REPLACE(city.name, "San Francisco", "South San Francisco") +| ENRICH city_names ON city.name WITH city.country.continent.planet.name = airport +| SORT city.name +; + +city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:text +Netherlands | Amsterdam | null +United States of America | South San Francisco | San Francisco Int'l +Japan | Tokyo | null +; + +shadowingSubfieldsLimit0 +required_capability: enrich_load +FROM addresses +| KEEP city.country.continent.planet.name, city.country.name, city.name +| EVAL city.name = REPLACE(city.name, "San Francisco", "South San Francisco") +| ENRICH city_names ON city.name WITH city.country.continent.planet.name = airport +| SORT city.name +| LIMIT 0 +; + +city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:text +; + shadowingSelf required_capability: enrich_load ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" @@ -107,6 +135,46 @@ ROW left = "left", airport = "Zurich Airport ZRH", city = "Zürich", middle = "m left:keyword | city:keyword | middle:keyword | right:keyword | airport:text | region:text | city_boundary:geo_shape ; +shadowingInternal +required_capability: enrich_load +ROW city = "Zürich" +| ENRICH city_names ON city WITH x = airport, x = region +; + +city:keyword | x:text +Zürich | Bezirk Zürich +; + +shadowingInternalImplicit +required_capability: enrich_load +ROW city = "Zürich" +| ENRICH city_names ON city WITH airport = region +; + +city:keyword | airport:text +Zürich | Bezirk Zürich +; + +shadowingInternalImplicit2 +required_capability: enrich_load +ROW city = "Zürich" +| ENRICH city_names ON city WITH airport, airport = region +; + +city:keyword | airport:text +Zürich | Bezirk Zürich +; + +shadowingInternalImplicit3 +required_capability: enrich_load +ROW city = "Zürich" +| ENRICH city_names ON city WITH airport = region, airport +; + +city:keyword | airport:text +Zürich | Zurich Int'l +; + simple required_capability: enrich_load diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 3df3b85e5e3af..87f54fbf0f174 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -15,6 +15,19 @@ left:keyword | right:keyword | x:integer left | right | 1 ; +shadowingSubfields +FROM addresses +| KEEP city.country.continent.planet.name, city.country.name, city.name +| EVAL city.country.continent.planet.name = to_upper(city.country.continent.planet.name) +| SORT city.name +; + +city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword +Netherlands | Amsterdam | EARTH +United States of America | San Francisco | EARTH +Japan | Tokyo | EARTH +; + shadowingSelf ROW left = "left", x = 10000 , right = "right" | EVAL x = x + 1 @@ -33,6 +46,16 @@ left:keyword | middle:keyword | right:keyword | x:integer | y:integer left | middle | right | 9 | 10 ; +shadowingInternal +ROW x = 10000 +| EVAL x = x + 1, x = x - 2 +; + +x:integer +9999 +; + + withMath row a = 1 | eval b = 2 + 3; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec index 9d574eed7be6b..d9857e8c122ef 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec @@ -26,6 +26,19 @@ first_name:keyword | left:keyword | full_name:keyword | right:keyword | last_nam Georgi | left | Georgi Facello | right | Facello ; +shadowingSubfields +FROM addresses +| KEEP city.country.continent.planet.name, city.country.name, city.name +| GROK city.name "%{WORD:city.country.continent.planet.name} %{WORD}" +| SORT city.name +; + +city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword +Netherlands | Amsterdam | null +United States of America | San Francisco | San +Japan | Tokyo | null +; + shadowingSelf FROM employees | KEEP first_name, last_name @@ -50,6 +63,18 @@ last_name:keyword | left:keyword | foo:keyword | middle:keyword | ri Facello | left | Georgi1 Georgi2 Facello | middle | right | Georgi1 | Georgi2 | Facello ; +shadowingInternal +FROM addresses +| KEEP city.name, zip_code +| GROK zip_code "%{WORD:zip_parts} %{WORD:zip_parts}" +| SORT city.name +; + +city.name:keyword | zip_code:keyword | zip_parts:keyword +Amsterdam | 1016 ED | ["1016", "ED"] +San Francisco | CA 94108 | ["CA", "94108"] +Tokyo | 100-7014 | null +; complexPattern ROW a = "1953-01-23T12:15:00Z 127.0.0.1 some.email@foo.com 42" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec index 14a3807b8729c..bcce35eb81e0f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec @@ -539,3 +539,63 @@ c:i 1 1 ; + +shadowingInternal +FROM employees +| SORT emp_no ASC +| KEEP last_name, emp_no, last_name +| LIMIT 2 +; + +emp_no:integer | last_name:keyword + 10001 | Facello + 10002 | Simmel +; + +shadowingInternalWildcard +FROM employees +| SORT emp_no ASC +| KEEP last*name, emp_no, last*name, first_name, last*, gender, last* +| LIMIT 2 +; + +emp_no:integer | first_name:keyword | gender:keyword | last_name:keyword + 10001 | Georgi | M | Facello + 10002 | Bezalel | F | Simmel +; + +shadowingInternalWildcardAndExplicit +FROM employees +| SORT emp_no ASC +| KEEP last*name, emp_no, last_name, first_name, last*, languages, last_name, gender, last*name +| LIMIT 2 +; + +emp_no:integer | first_name:keyword | languages:integer | last_name:keyword | gender:keyword + 10001 | Georgi | 2 | Facello | M + 10002 | Bezalel | 5 | Simmel | F +; + +shadowingSubfields +FROM addresses +| KEEP city.country.continent.planet.name, city.country.continent.name, city.country.name, city.name, city.country.continent.planet.name +| SORT city.name +; + +city.country.continent.name:keyword | city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword +Europe | Netherlands | Amsterdam | Earth +North America | United States of America | San Francisco | Earth +Asia | Japan | Tokyo | Earth +; + +shadowingSubfieldsWildcard +FROM addresses +| KEEP *name, city.country.continent.planet.name +| SORT city.name +; + +city.country.continent.name:keyword | city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword +Europe | Netherlands | Amsterdam | Earth +North America | United States of America | San Francisco | Earth +Asia | Japan | Tokyo | Earth +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-addresses.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-addresses.json new file mode 100644 index 0000000000000..679efb3c8d38b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-addresses.json @@ -0,0 +1,44 @@ +{ + "properties" : { + "street" : { + "type": "keyword" + }, + "number" : { + "type": "keyword" + }, + "zip_code": { + "type": "keyword" + }, + "city" : { + "properties": { + "name": { + "type": "keyword" + }, + "country": { + "properties": { + "name": { + "type": "keyword" + }, + "continent": { + "properties": { + "name": { + "type": "keyword" + }, + "planet": { + "properties": { + "name": { + "type": "keyword" + }, + "galaxy": { + "type": "keyword" + } + } + } + } + } + } + } + } + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec index 1e830486cc7c7..ca4c627cae749 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec @@ -174,3 +174,42 @@ avg_worked_seconds:l | birth_date:date | emp_no:i | first_n 341158890 | 1961-10-15T00:00:00.000Z | 10060 | Breannda | M | 1.42 | 1.4199999570846558 | 1.419921875 | 1.42 | 1987-11-02T00:00:00.000Z | [false, false, false, true]| [Business Analyst, Data Scientist, Senior Team Lead] | 2 | 2 | 2 | 2 | Billingsley | 29175 | [-1.76, -0.85] | [-1, 0] | [-0.85, -1.76] | [-1, 0] | true | 29175 246355863 | null | 10042 | Magy | F | 1.44 | 1.440000057220459 | 1.4404296875 | 1.44 | 1993-03-21T00:00:00.000Z | null | [Architect, Business Analyst, Internship, Junior Developer] | 3 | 3 | 3 | 3 | Stamatiou | 30404 | [-9.28, 9.42] | [-9, 9] | [-9.28, 9.42] | [-9, 9] | true | 30404 ; + +shadowing +FROM employees +| SORT emp_no ASC +| KEEP emp_no, first_name, last_name +| RENAME emp_no AS last_name +| LIMIT 2 +; + +last_name:integer | first_name:keyword + 10001 | Georgi + 10002 | Bezalel +; + +shadowingSubfields +FROM addresses +| KEEP city.country.continent.planet.name, city.country.continent.name, city.country.name, city.name +| RENAME city.name AS city.country.continent.planet.name, city.country.name AS city.country.continent.name +| SORT city.country.continent.planet.name +; + +city.country.continent.name:keyword | city.country.continent.planet.name:keyword +Netherlands | Amsterdam +United States of America | San Francisco +Japan | Tokyo +; + +shadowingInternal +FROM employees +| SORT emp_no ASC +| KEEP emp_no, last_name +| RENAME emp_no AS x, last_name AS x +| LIMIT 2 +; + +x:keyword +Facello +Simmel +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec index bb1cf7358ca74..da640b6306299 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec @@ -36,6 +36,24 @@ a:integer // end::multivalue-result[] ; +shadowingInternal +required_capability: unique_names +ROW a = 1, a = 2; + +a:integer + 2 +; + +shadowingInternalSubfields +required_capability: unique_names +// Fun fact: "Sissi" is an actual exoplanet name, after the character from the movie with the same name. A.k.a. HAT-P-14 b. +ROW city.country.continent.planet.name = "Earth", city.country.continent.name = "Netherlands", city.country.continent.planet.name = "Sissi" +; + +city.country.continent.name:keyword | city.country.continent.planet.name:keyword +Netherlands | Sissi +; + unsignedLongLiteral ROW long_max = 9223372036854775807, ul_start = 9223372036854775808, ul_end = 18446744073709551615, double=18446744073709551616; @@ -70,10 +88,11 @@ a:integer | b:integer | c:null | z:integer ; evalRowWithNull2 +required_capability: unique_names row a = 1, null, b = 2, c = null, null | eval z = a+b; -a:integer | null:null | b:integer | c:null | null:null | z:integer -1 | null | 2 | null | null | 3 +a:integer | b:integer | c:null | null:null | z:integer + 1 | 2 | null | null | 3 ; evalRowWithNull3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index e4fc0580e4ba2..9558cf235b847 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1819,3 +1819,36 @@ warning:Line 3:17: java.lang.ArithmeticException: / by zero w_avg:double null ; + +shadowingInternal +FROM employees +| STATS x = MAX(emp_no), x = MIN(emp_no) +; + +x:integer +10001 +; + +shadowingInternalWithGroup +FROM employees +| STATS x = MAX(emp_no), x = MIN(emp_no) BY x = gender +| SORT x ASC +; + +x:keyword +F +M +null +; + +shadowingTheGroup +FROM employees +| STATS gender = MAX(emp_no), gender = MIN(emp_no) BY gender +| SORT gender ASC +; + +gender:keyword +F +M +null +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index d361a0f9ebd3d..5641f49b039f6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -133,7 +133,13 @@ public enum Cap { * Fix the status code returned when trying to run count_distinct on the _source type (which is not supported). * see count_distinct(_source) returns a 500 response */ - FIX_COUNT_DISTINCT_SOURCE_ERROR; + FIX_COUNT_DISTINCT_SOURCE_ERROR, + + /** + * Fix for non-unique attribute names in ROW and logical plans. + * https://github.com/elastic/elasticsearch/issues/110541 + */ + UNIQUE_NAMES; private final boolean snapshotOnly; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 21203f8dbb3dd..9468647fd10e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -1068,13 +1068,29 @@ public static Expression castStringLiteral(Expression from, DataType target) { * Any fields which could not be resolved by conversion functions will be converted to UnresolvedAttribute instances in a later rule * (See UnresolveUnionTypes below). */ - private static class ResolveUnionTypes extends BaseAnalyzerRule { + private static class ResolveUnionTypes extends Rule { record TypeResolutionKey(String fieldName, DataType fieldType) {} + private List unionFieldAttributes; + @Override - protected LogicalPlan doRule(LogicalPlan plan) { - List unionFieldAttributes = new ArrayList<>(); + public LogicalPlan apply(LogicalPlan plan) { + unionFieldAttributes = new ArrayList<>(); + // Collect field attributes from previous runs + plan.forEachUp(EsRelation.class, rel -> { + for (Attribute attr : rel.output()) { + if (attr instanceof FieldAttribute fa && fa.field() instanceof MultiTypeEsField) { + unionFieldAttributes.add(fa); + } + } + }); + + return plan.transformUp(LogicalPlan.class, p -> p.resolved() || p.childrenResolved() == false ? p : doRule(p)); + } + + private LogicalPlan doRule(LogicalPlan plan) { + int alreadyAddedUnionFieldAttributes = unionFieldAttributes.size(); // See if the eval function has an unresolved MultiTypeEsField field // Replace the entire convert function with a new FieldAttribute (containing type conversion knowledge) plan = plan.transformExpressionsOnly( @@ -1082,7 +1098,7 @@ protected LogicalPlan doRule(LogicalPlan plan) { convert -> resolveConvertFunction(convert, unionFieldAttributes) ); // If no union fields were generated, return the plan as is - if (unionFieldAttributes.isEmpty()) { + if (unionFieldAttributes.size() == alreadyAddedUnionFieldAttributes) { return plan; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 4c5d9efb449f7..c02b9948def3f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -8,8 +8,10 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.plan.QueryPlan; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; @@ -36,6 +38,9 @@ import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; +import java.util.HashSet; +import java.util.Set; + import static org.elasticsearch.xpack.esql.core.common.Failure.fail; class OptimizerRules { @@ -49,9 +54,24 @@ void checkPlan(P p, Failures failures) { AttributeSet input = p.inputSet(); AttributeSet generated = generates(p); AttributeSet missing = refs.subtract(input).subtract(generated); - if (missing.size() > 0) { + if (missing.isEmpty() == false) { failures.add(fail(p, "Plan [{}] optimized incorrectly due to missing references {}", p.nodeString(), missing)); } + + Set outputAttributeNames = new HashSet<>(); + Set outputAttributeIds = new HashSet<>(); + for (Attribute outputAttr : p.output()) { + if (outputAttributeNames.add(outputAttr.name()) == false || outputAttributeIds.add(outputAttr.id()) == false) { + failures.add( + fail( + p, + "Plan [{}] optimized incorrectly due to duplicate output attribute {}", + p.nodeString(), + outputAttr.toString() + ) + ); + } + } } protected AttributeSet references(P p) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 84c849a759ae5..526cf7f17440d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -74,6 +74,7 @@ import static org.elasticsearch.xpack.esql.core.parser.ParserUtils.source; import static org.elasticsearch.xpack.esql.core.parser.ParserUtils.typedParsing; import static org.elasticsearch.xpack.esql.core.parser.ParserUtils.visitList; +import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputExpressions; import static org.elasticsearch.xpack.esql.plan.logical.Enrich.Mode; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToInt; @@ -235,8 +236,9 @@ public Map visitCommandOptions(EsqlBaseParser.CommandOptionsCont } @Override + @SuppressWarnings("unchecked") public LogicalPlan visitRowCommand(EsqlBaseParser.RowCommandContext ctx) { - return new Row(source(ctx), visitFields(ctx.fields())); + return new Row(source(ctx), (List) (List) mergeOutputExpressions(visitFields(ctx.fields()), List.of())); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java index 7d99c566aa0c7..2d816ae117816 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; @@ -30,6 +31,12 @@ public List renamings() { return renamings; } + @Override + public List output() { + // Rename is mapped to a Project during analysis; we do not compute the output here. + throw new IllegalStateException("Should never reach here."); + } + @Override public boolean expressionsResolved() { for (var alias : renamings) { From c7ece9b400aa9abb20e47631f8fc4f1cda00a072 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 17 Jul 2024 17:15:11 +0200 Subject: [PATCH 073/406] ESQL: Correctly compute Rename's output (#110968) (#110976) Calling Rename.output() previously returned wrong results. Since #110488, instead it throws an IllegalStateException. That leads to test failures in the EsqlNodeSubclassTests because e.g. MvExpandExec and FieldExtractExec eagerly calls .output() on its child when it's being constructed, and the child can be a fragment containing a Rename. (cherry picked from commit 7df1b06525d574dcc250736bb506fbb058fd8c9f) # Conflicts: # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java --- .../xpack/esql/core/rule/Rule.java | 4 +-- .../xpack/esql/analysis/Analyzer.java | 27 ++++++++++++++----- .../xpack/esql/plan/logical/Rename.java | 9 +++++-- 3 files changed, 30 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/rule/Rule.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/rule/Rule.java index 6121c9b36442b..163b1f89f2abb 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/rule/Rule.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/rule/Rule.java @@ -6,8 +6,8 @@ */ package org.elasticsearch.xpack.esql.core.rule; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.esql.core.tree.Node; import org.elasticsearch.xpack.esql.core.util.ReflectionUtils; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 9468647fd10e3..b41156824be12 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; @@ -383,7 +384,7 @@ private LocalRelation tableMapAsRelation(Source source, Map mapT } } - private static class ResolveRefs extends BaseAnalyzerRule { + public static class ResolveRefs extends BaseAnalyzerRule { @Override protected LogicalPlan doRule(LogicalPlan plan) { if (plan.childrenResolved() == false) { @@ -575,20 +576,28 @@ private LogicalPlan resolveLookup(Lookup l, List childrenOutput) { } private Attribute maybeResolveAttribute(UnresolvedAttribute ua, List childrenOutput) { + return maybeResolveAttribute(ua, childrenOutput, log); + } + + private static Attribute maybeResolveAttribute(UnresolvedAttribute ua, List childrenOutput, Logger logger) { if (ua.customMessage()) { return ua; } - return resolveAttribute(ua, childrenOutput); + return resolveAttribute(ua, childrenOutput, logger); } private Attribute resolveAttribute(UnresolvedAttribute ua, List childrenOutput) { + return resolveAttribute(ua, childrenOutput, log); + } + + private static Attribute resolveAttribute(UnresolvedAttribute ua, List childrenOutput, Logger logger) { Attribute resolved = ua; var named = resolveAgainstList(ua, childrenOutput); // if resolved, return it; otherwise keep it in place to be resolved later if (named.size() == 1) { resolved = named.get(0); - if (log.isTraceEnabled() && resolved.resolved()) { - log.trace("Resolved {} to {}", ua, resolved); + if (logger != null && logger.isTraceEnabled() && resolved.resolved()) { + logger.trace("Resolved {} to {}", ua, resolved); } } else { if (named.size() > 0) { @@ -724,6 +733,12 @@ private LogicalPlan resolveDrop(Drop drop, List childOutput) { } private LogicalPlan resolveRename(Rename rename, List childrenOutput) { + List projections = projectionsForRename(rename, childrenOutput, log); + + return new EsqlProject(rename.source(), rename.child(), projections); + } + + public static List projectionsForRename(Rename rename, List childrenOutput, Logger logger) { List projections = new ArrayList<>(childrenOutput); int renamingsCount = rename.renamings().size(); @@ -736,7 +751,7 @@ private LogicalPlan resolveRename(Rename rename, List childrenOutput) // remove attributes overwritten by a renaming: `| keep a, b, c | rename a as b` projections.removeIf(x -> x.name().equals(alias.name())); - var resolved = maybeResolveAttribute(ua, childrenOutput); + var resolved = maybeResolveAttribute(ua, childrenOutput, logger); if (resolved instanceof UnsupportedAttribute || resolved.resolved()) { var realiased = (NamedExpression) alias.replaceChildren(List.of(resolved)); projections.replaceAll(x -> x.equals(resolved) ? realiased : x); @@ -779,7 +794,7 @@ private LogicalPlan resolveRename(Rename rename, List childrenOutput) // add unresolved renamings to later trip the Verifier. projections.addAll(unresolved); - return new EsqlProject(rename.source(), rename.child(), projections); + return projections; } private LogicalPlan resolveEnrich(Enrich enrich, List childrenOutput) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java index 2d816ae117816..9a296dc58eb43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java @@ -7,8 +7,11 @@ package org.elasticsearch.xpack.esql.plan.logical; +import org.elasticsearch.xpack.esql.analysis.Analyzer.ResolveRefs; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; @@ -33,8 +36,10 @@ public List renamings() { @Override public List output() { - // Rename is mapped to a Project during analysis; we do not compute the output here. - throw new IllegalStateException("Should never reach here."); + // Normally shouldn't reach here, as Rename only exists before resolution. + List projectionsAfterResolution = ResolveRefs.projectionsForRename(this, this.child().output(), null); + + return Expressions.asAttributes(projectionsAfterResolution); } @Override From ecbba721828a9a2f6d8c610177a68e9be3dc5b21 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 17 Jul 2024 20:17:11 +0200 Subject: [PATCH 074/406] Adding minimal docs around using index mode logs. (#110932) (#110988) This adds minimal docs around how to the new logs index mode for data streams (most common use case). This is minimal because logs index mode is still in tech preview. Minimal docs should allow any interested users to experiment with the new logs index mode. --- .../data-streams/data-streams.asciidoc | 1 + docs/reference/data-streams/logs.asciidoc | 52 +++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 docs/reference/data-streams/logs.asciidoc diff --git a/docs/reference/data-streams/data-streams.asciidoc b/docs/reference/data-streams/data-streams.asciidoc index 9c7137563caef..1484e21febdb3 100644 --- a/docs/reference/data-streams/data-streams.asciidoc +++ b/docs/reference/data-streams/data-streams.asciidoc @@ -157,4 +157,5 @@ include::set-up-a-data-stream.asciidoc[] include::use-a-data-stream.asciidoc[] include::change-mappings-and-settings.asciidoc[] include::tsds.asciidoc[] +include::logs.asciidoc[] include::lifecycle/index.asciidoc[] diff --git a/docs/reference/data-streams/logs.asciidoc b/docs/reference/data-streams/logs.asciidoc new file mode 100644 index 0000000000000..a2d8b6776e052 --- /dev/null +++ b/docs/reference/data-streams/logs.asciidoc @@ -0,0 +1,52 @@ +[[logs-data-stream]] +== Logs data stream + +preview::[Logs data streams and the logs index mode are in tech preview and may be changed or removed in the future. Don't use logs data streams or logs index mode in production.] + +A logs data stream is a data stream type that stores log data more efficiently. + +In benchmarks, log data stored in a logs data stream used ~2.5 times less disk space than a regular data +stream. The exact impact will vary depending on your data set. + +The following features are enabled in a logs data stream: + +* <>, which omits storing the `_source` field. When the document source is requested, it is synthesized from document fields upon retrieval. + +* Index sorting. This yields a lower storage footprint. By default indices are sorted by `host.name` and `@timestamp` fields at index time. + +* More space efficient compression for fields with <> enabled. + +[discrete] +[[how-to-use-logsds]] +=== Create a logs data stream + +To create a logs data stream, set your index template `index.mode` to `logs`: + +[source,console] +---- +PUT _index_template/my-index-template +{ + "index_patterns": ["logs-*"], + "data_stream": { }, + "template": { + "settings": { + "index.mode": "logs" <1> + } + }, + "priority": 101 <2> +} +---- +// TEST + +<1> The index mode setting. +<2> The index template priority. By default, Elasticsearch ships with an index template with a `logs-*-*` pattern with a priority of 100. You need to define a priority higher than 100 to ensure that this index template gets selected over the default index template for the `logs-*-*` pattern. See the <> for more information. + +After the index template is created, new indices that use the template will be configured as a logs data stream. You can start indexing data and <>. + +//// +[source,console] +---- +DELETE _index_template/my-index-template +---- +// TEST[continued] +//// From 41356cde936866a20a1a91c11d6b67fe9cf09059 Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Wed, 17 Jul 2024 17:25:05 -0400 Subject: [PATCH 075/406] Fix references to incorrect query rule criteria type (#110994) (#111000) --- docs/reference/query-rules/apis/put-query-rule.asciidoc | 4 ++-- docs/reference/query-rules/apis/put-query-ruleset.asciidoc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/query-rules/apis/put-query-rule.asciidoc b/docs/reference/query-rules/apis/put-query-rule.asciidoc index 2b9a6ba892b84..9737673be009c 100644 --- a/docs/reference/query-rules/apis/put-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/put-query-rule.asciidoc @@ -70,10 +70,10 @@ Matches all queries, regardless of input. -- - `metadata` (Optional, string) The metadata field to match against. This metadata will be used to match against `match_criteria` sent in the <>. -Required for all criteria types except `global`. +Required for all criteria types except `always`. - `values` (Optional, array of strings) The values to match against the metadata field. Only one value must match for the criteria to be met. -Required for all criteria types except `global`. +Required for all criteria types except `always`. `actions`:: (Required, object) The actions to take when the rule is matched. diff --git a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc index 012060e1004ae..c164e9e140a4e 100644 --- a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc @@ -78,10 +78,10 @@ Matches all queries, regardless of input. -- - `metadata` (Optional, string) The metadata field to match against. This metadata will be used to match against `match_criteria` sent in the <>. -Required for all criteria types except `global`. +Required for all criteria types except `always`. - `values` (Optional, array of strings) The values to match against the metadata field. Only one value must match for the criteria to be met. -Required for all criteria types except `global`. +Required for all criteria types except `always`. Actions depend on the rule type. For `pinned` rules, actions follow the format specified by the <>. From 5b9639a2f6f44c1e24d29446c4826d30e4a60764 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 18 Jul 2024 09:04:30 +0100 Subject: [PATCH 076/406] [DOCS] Retrievers and rerankers (#110007) (#111008) Co-authored-by: Adam Demjen --- docs/reference/search/retriever.asciidoc | 67 ++++++++ .../retrievers-reranking/index.asciidoc | 8 + .../retrievers-overview.asciidoc | 71 ++++---- .../semantic-reranking.asciidoc | 151 ++++++++++++++++++ .../search-your-data.asciidoc | 2 +- 5 files changed, 261 insertions(+), 38 deletions(-) create mode 100644 docs/reference/search/search-your-data/retrievers-reranking/index.asciidoc rename docs/reference/search/search-your-data/{ => retrievers-reranking}/retrievers-overview.asciidoc (75%) create mode 100644 docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index 590df272cc89e..ed39ac786880b 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -28,6 +28,9 @@ A <> that replaces the functionality of a <> that produces top documents from <>. +`text_similarity_reranker`:: +A <> that enhances search results by re-ranking documents based on semantic similarity to a specified inference text, using a machine learning model. + [[standard-retriever]] ==== Standard Retriever @@ -201,6 +204,70 @@ GET /index/_search ---- // NOTCONSOLE +[[text-similarity-reranker-retriever]] +==== Text Similarity Re-ranker Retriever + +The `text_similarity_reranker` is a type of retriever that enhances search results by re-ranking documents based on semantic similarity to a specified inference text, using a machine learning model. + +===== Prerequisites + +To use `text_similarity_reranker` you must first set up a `rerank` task using the <>. +The `rerank` task should be set up with a machine learning model that can compute text similarity. +Currently you can integrate directly with the Cohere Rerank endpoint using the <> task, or upload a model to {es} <>. + +===== Parameters + +`field`:: +(Required, `string`) ++ +The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the `inferenceText`. + +`inference_id`:: +(Required, `string`) ++ +Unique identifier of the inference endpoint created using the {infer} API. + +`inference_text`:: +(Required, `string`) ++ +The text snippet used as the basis for similarity comparison. + +`rank_window_size`:: +(Optional, `int`) ++ +The number of top documents to consider in the re-ranking process. Defaults to `10`. + +`min_score`:: +(Optional, `float`) ++ +Sets a minimum threshold score for including documents in the re-ranked results. Documents with similarity scores below this threshold will be excluded. Note that score calculations vary depending on the model used. + +===== Restrictions + +A text similarity re-ranker retriever is a compound retriever. Child retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. + +===== Example + +[source,js] +---- +GET /index/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "standard": { ... } + } + }, + "field": "text", + "inference_id": "my-cohere-rerank-model", + "inference_text": "Most famous landmark in Paris", + "rank_window_size": 100, + "min_score": 0.5 + } +} +---- +// NOTCONSOLE + ==== Using `from` and `size` with a retriever tree The <> and <> diff --git a/docs/reference/search/search-your-data/retrievers-reranking/index.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/index.asciidoc new file mode 100644 index 0000000000000..87ed52e365370 --- /dev/null +++ b/docs/reference/search/search-your-data/retrievers-reranking/index.asciidoc @@ -0,0 +1,8 @@ +[[retrievers-reranking-overview]] +== Retrievers and reranking + +* <> +* <> + +include::retrievers-overview.asciidoc[] +include::semantic-reranking.asciidoc[] diff --git a/docs/reference/search/search-your-data/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc similarity index 75% rename from docs/reference/search/search-your-data/retrievers-overview.asciidoc rename to docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc index 92cd085583916..99659ae76e092 100644 --- a/docs/reference/search/search-your-data/retrievers-overview.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc @@ -1,7 +1,5 @@ [[retrievers-overview]] -== Retrievers - -// Will move to a top level "Retrievers and reranking" section once reranking is live +=== Retrievers preview::[] @@ -15,33 +13,32 @@ For implementation details, including notable restrictions, check out the [discrete] [[retrievers-overview-types]] -=== Retriever types +==== Retriever types Retrievers come in various types, each tailored for different search operations. The following retrievers are currently available: -* <>. -Returns top documents from a traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. -Mimics a traditional query but in the context of a retriever framework. -This ensures backward compatibility as existing `_search` requests remain supported. -That way you can transition to the new abstraction at your own pace without mixing syntaxes. -* <>. -Returns top documents from a <>, in the context of a retriever framework. -* <>. -Combines and ranks multiple first-stage retrievers using the reciprocal rank fusion (RRF) algorithm. -Allows you to combine multiple result sets with different relevance indicators into a single result set. -An RRF retriever is a *compound retriever*, where its `filter` element is propagated to its sub retrievers. +* <>. Returns top documents from a +traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. +Mimics a traditional query but in the context of a retriever framework. This +ensures backward compatibility as existing `_search` requests remain supported. +That way you can transition to the new abstraction at your own pace without +mixing syntaxes. +* <>. Returns top documents from a <>, +in the context of a retriever framework. +* <>. Combines and ranks multiple first-stage retrievers using +the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets +with different relevance indicators into a single result set. +An RRF retriever is a *compound retriever*, where its `filter` element is +propagated to its sub retrievers. + Sub retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. See the <> for detailed examples and information on how to use the RRF retriever. - -[NOTE] -==== -Stay tuned for more retriever types in future releases! -==== +* <>. Used for <>. +Requires first creating a `rerank` task using the <>. [discrete] -=== What makes retrievers useful? +==== What makes retrievers useful? Here's an overview of what makes retrievers useful and how they differ from regular queries. @@ -73,7 +70,7 @@ When using compound retrievers, only the query element is allowed, which enforce [discrete] [[retrievers-overview-example]] -=== Example +==== Example The following example demonstrates how using retrievers simplify the composability of queries for RRF ranking. @@ -154,25 +151,23 @@ GET example-index/_search [discrete] [[retrievers-overview-glossary]] -=== Glossary +==== Glossary Here are some important terms: -* *Retrieval Pipeline*. -Defines the entire retrieval and ranking logic to produce top hits. -* *Retriever Tree*. -A hierarchical structure that defines how retrievers interact. -* *First-stage Retriever*. -Returns an initial set of candidate documents. -* *Compound Retriever*. -Builds on one or more retrievers, enhancing document retrieval and ranking logic. -* *Combiners*. -Compound retrievers that merge top hits from multiple sub-retrievers. -//* NOT YET *Rerankers*. Special compound retrievers that reorder hits and may adjust the number of hits, with distinctions between first-stage and second-stage rerankers. +* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to +produce top hits. +* *Retriever Tree*. A hierarchical structure that defines how retrievers interact. +* *First-stage Retriever*. Returns an initial set of candidate documents. +* *Compound Retriever*. Builds on one or more retrievers, +enhancing document retrieval and ranking logic. +* *Combiners*. Compound retrievers that merge top hits +from multiple sub-retrievers. +* *Rerankers*. Special compound retrievers that reorder hits and may adjust the number of hits, with distinctions between first-stage and second-stage rerankers. [discrete] [[retrievers-overview-play-in-search]] -=== Retrievers in action +==== Retrievers in action The Search Playground builds Elasticsearch queries using the retriever abstraction. It automatically detects the fields and types in your index and builds a retriever tree based on your selections. @@ -180,7 +175,9 @@ It automatically detects the fields and types in your index and builds a retriev You can use the Playground to experiment with different retriever configurations and see how they affect search results. Refer to the {kibana-ref}/playground.html[Playground documentation] for more information. -// Content coming in https://github.com/elastic/kibana/pull/182692 - +[discrete] +[[retrievers-overview-api-reference]] +==== API reference +For implementation details, including notable restrictions, check out the <> in the Search API docs. \ No newline at end of file diff --git a/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc new file mode 100644 index 0000000000000..75c06aa953302 --- /dev/null +++ b/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc @@ -0,0 +1,151 @@ +[[semantic-reranking]] +=== Semantic reranking + +preview::[] + +[TIP] +==== +This overview focuses more on the high-level concepts and use cases for semantic reranking. For full implementation details on how to set up and use semantic reranking in {es}, see the <> in the Search API docs. +==== + +Rerankers improve the relevance of results from earlier-stage retrieval mechanisms. +_Semantic_ rerankers use machine learning models to reorder search results based on their semantic similarity to a query. + +First-stage retrievers and rankers must be very fast and efficient because they process either the entire corpus, or all matching documents. +In a multi-stage pipeline, you can progressively use more computationally intensive ranking functions and techniques, as they will operate on smaller result sets at each step. +This helps avoid query latency degradation and keeps costs manageable. + +Semantic reranking requires relatively large and complex machine learning models and operates in real-time in response to queries. +This technique makes sense on a small _top-k_ result set, as one the of the final steps in a pipeline. +This is a powerful technique for improving search relevance that works equally well with keyword, semantic, or hybrid retrieval algorithms. + +The next sections provide more details on the benefits, use cases, and model types used for semantic reranking. +The final sections include a practical, high-level overview of how to implement <> and links to the full reference documentation. + +[discrete] +[[semantic-reranking-use-cases]] +==== Use cases + +Semantic reranking enables a variety of use cases: + +* *Lexical (BM25) retrieval results reranking* +** Out-of-the-box semantic search by adding a simple API call to any lexical/BM25 retrieval pipeline. +** Adds semantic search capabilities on top of existing indices without reindexing, perfect for quick improvements. +** Ideal for environments with complex existing indices. + +* *Semantic retrieval results reranking* +** Improves results from semantic retrievers using ELSER sparse vector embeddings or dense vector embeddings by using more powerful models. +** Adds a refinement layer on top of hybrid retrieval with <>. + +* *General applications* +** Supports automatic and transparent chunking, eliminating the need for pre-chunking at index time. +** Provides explicit control over document relevance in retrieval-augmented generation (RAG) uses cases or other scenarios involving language model (LLM) inputs. + +Now that we've outlined the value of semantic reranking, we'll explore the specific models that power this process and how they differ. + +[discrete] +[[semantic-reranking-models]] +==== Cross-encoder and bi-encoder models + +At a high level, two model types are used for semantic reranking: cross-encoders and bi-encoders. + +NOTE: In this version, {es} *only supports cross-encoders* for semantic reranking. + +* A *cross-encoder model* can be thought of as a more powerful, all-in-one solution, because it generates query-aware document representations. +It takes the query and document texts as a single, concatenated input. +* A *bi-encoder model* takes as input either document or query text. +Documents and query embeddings are computed separately, so they aren't aware of each other. +** To compute a ranking score, an external operation is required. This typically involves computing dot-product or cosine similarity between the query and document embeddings. + +In brief, cross-encoders provide high accuracy but are more resource-intensive. +Bi-encoders are faster and more cost-effective but less precise. + +In future versions, {es} will also support bi-encoders. +If you're interested in a more detailed analysis of the practical differences between cross-encoders and bi-encoders, untoggle the next section. + +.Comparisons between cross-encoder and bi-encoder +[%collapsible] +============== +The following is a non-exhaustive list of considerations when choosing between cross-encoders and bi-encoders for semantic reranking: + +* Because a cross-encoder model simultaneously processes both query and document texts, it can better infer their relevance, making it more effective as a reranker than a bi-encoder. +* Cross-encoder models are generally larger and more computationally intensive, resulting in higher latencies and increased computational costs. +* There are significantly fewer open-source cross-encoders, while bi-encoders offer a wide variety of sizes, languages, and other trade-offs. +* The effectiveness of cross-encoders can also improve the relevance of semantic retrievers. +For example, their ability to take word order into account can improve on dense or sparse embedding retrieval. +* When trained in tandem with specific retrievers (like lexical/BM25), cross-encoders can “correct” typical errors made by those retrievers. +* Cross-encoders output scores that are consistent across queries. +This enables you to maintain high relevance in result sets, by setting a minimum score threshold for all queries. +For example, this is important when using results in a RAG workflow or if you're otherwise feeding results to LLMs. +Note that similarity scores from bi-encoders/embedding similarities are _query-dependent_, meaning you cannot set universal cut-offs. +* Bi-encoders rerank using embeddings. You can improve your reranking latency by creating embeddings at ingest-time. These embeddings can be stored for reranking without being indexed for retrieval, reducing your memory footprint. +============== + +[discrete] +[[semantic-reranking-in-es]] +==== Semantic reranking in {es} + +In {es}, semantic rerankers are implemented using the {es} <> and a <>. + +To use semantic reranking in {es}, you need to: + +. Choose a reranking model. In addition to cross-encoder models running on {es} inference nodes, we also expose external models and services via the Inference API to semantic rerankers. +** This includes cross-encoder models running in https://huggingface.co/inference-endpoints[HuggingFace Inference Endpoints] and the https://cohere.com/rerank[Cohere Rerank API]. +. Create a `rerank` task using the <>. +The Inference API creates an inference endpoint and configures your chosen machine learning model to perform the reranking task. +. Define a `text_similarity_reranker` retriever in your search request. +The retriever syntax makes it simple to configure both the retrieval and reranking of search results in a single API call. + +.*Example search request* with semantic reranker +[%collapsible] +============== +The following example shows a search request that uses a semantic reranker to reorder the top-k documents based on their semantic similarity to the query. +[source,console] +---- +POST _search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "match": { + "text": "How often does the moon hide the sun?" + } + } + } + }, + "field": "text", + "inference_id": "my-cohere-rerank-model", + "inference_text": "How often does the moon hide the sun?", + "rank_window_size": 100, + "min_score": 0.5 + } + } +} +---- +// TEST[skip:TBD] +============== + +[discrete] +[[semantic-reranking-types]] +==== Supported reranking types + +The following `text_similarity_reranker` model configuration options are available. + +*Text similarity with cross-encoder* + +This solution uses a hosted or 3rd party inference service which relies on a cross-encoder model. +The model receives the text fields from the _top-K_ documents, as well as the search query, and calculates scores directly, which are then used to rerank the documents. + +Used with the Cohere inference service rolled out in 8.13, turn on semantic reranking that works out of the box. +Check out our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/cohere/cohere-elasticsearch.ipynb[Python notebook] for using Cohere with {es}. + +[discrete] +[[semantic-reranking-learn-more]] +==== Learn more + +* Read the <> for syntax and implementation details +* Learn more about the <> abstraction +* Learn more about the Elastic <> +* Check out our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/cohere/cohere-elasticsearch.ipynb[Python notebook] for using Cohere with {es} \ No newline at end of file diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index e1c1618410f2f..a885df2f2179e 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -45,7 +45,7 @@ results directly in the Kibana Search UI. include::search-api.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] -include::retrievers-overview.asciidoc[] +include::retrievers-reranking/index.asciidoc[] include::learning-to-rank.asciidoc[] include::search-across-clusters.asciidoc[] include::search-with-synonyms.asciidoc[] From 5b22c12fc78f6665f12b72d42274605d9fd45dd3 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Thu, 18 Jul 2024 10:53:32 +0200 Subject: [PATCH 077/406] Clarify synonyms docs (#110822) (#111013) --- .../synonym-graph-tokenfilter.asciidoc | 135 +++++++++++------ .../tokenfilters/synonym-tokenfilter.asciidoc | 139 ++++++++++++------ .../tokenfilters/synonyms-format.asciidoc | 2 +- .../search-with-synonyms.asciidoc | 13 ++ .../synonyms/apis/synonyms-apis.asciidoc | 17 +++ 5 files changed, 220 insertions(+), 86 deletions(-) diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index 3efb8f6de9b3e..e37118019a55c 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -85,45 +85,45 @@ Additional settings are: <> search analyzers to pick up changes to synonym files. Only to be used for search analyzers. * `expand` (defaults to `true`). -* `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important -to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request: - -[source,console] --------------------------------------------------- -PUT /test_index -{ - "settings": { - "index": { - "analysis": { - "analyzer": { - "synonym": { - "tokenizer": "standard", - "filter": [ "my_stop", "synonym_graph" ] - } - }, - "filter": { - "my_stop": { - "type": "stop", - "stopwords": [ "bar" ] - }, - "synonym_graph": { - "type": "synonym_graph", - "lenient": true, - "synonyms": [ "foo, bar => baz" ] - } - } - } - } - } -} --------------------------------------------------- +Expands definitions for equivalent synonym rules. +See <>. +* `lenient` (defaults to `false`). +If `true` ignores errors while parsing the synonym configuration. +It is important to note that only those synonym rules which cannot get parsed are ignored. +See <> for an example of `lenient` behaviour for invalid synonym rules. + +[discrete] +[[synonym-graph-tokenizer-expand-equivalent-synonyms]] +===== `expand` equivalent synonym rules + +The `expand` parameter controls whether to expand equivalent synonym rules. +Consider a synonym defined like: + +`foo, bar, baz` + +Using `expand: true`, the synonym rule would be expanded into: -With the above request the word `bar` gets skipped but a mapping `foo => baz` is still added. However, if the mapping -being added was `foo, baz => bar` nothing would get added to the synonym list. This is because the target word for the -mapping is itself eliminated because it was a stop word. Similarly, if the mapping was "bar, foo, baz" and `expand` was -set to `false` no mapping would get added as when `expand=false` the target mapping is the first word. However, if -`expand=true` then the mappings added would be equivalent to `foo, baz => foo, baz` i.e, all mappings other than the -stop word. +``` +foo => foo +foo => bar +foo => baz +bar => foo +bar => bar +bar => baz +baz => foo +baz => bar +baz => baz +``` + +When `expand` is set to `false`, the synonym rule is not expanded and the first synonym is treated as the canonical representation. The synonym would be equivalent to: + +``` +foo => foo +bar => foo +baz => foo +``` + +The `expand` parameter does not affect explicit synonym rules, like `foo, bar => baz`. [discrete] [[synonym-graph-tokenizer-ignore_case-deprecated]] @@ -160,12 +160,65 @@ Text will be processed first through filters preceding the synonym filter before {es} will also use the token filters preceding the synonym filter in a tokenizer chain to parse the entries in a synonym file or synonym set. In the above example, the synonyms graph token filter is placed after a stemmer. The stemmer will also be applied to the synonym entries. -The synonym rules should not contain words that are removed by a filter that appears later in the chain (like a `stop` filter). -Removing a term from a synonym rule means there will be no matching for it at query time. - Because entries in the synonym map cannot have stacked positions, some token filters may cause issues here. Token filters that produce multiple versions of a token may choose which version of the token to emit when parsing synonyms. For example, `asciifolding` will only produce the folded version of the token. Others, like `multiplexer`, `word_delimiter_graph` or `ngram` will throw an error. If you need to build analyzers that include both multi-token filters and synonym filters, consider using the <> filter, with the multi-token filters in one branch and the synonym filter in the other. + +[discrete] +[[synonym-graph-tokenizer-stop-token-filter]] +===== Synonyms and `stop` token filters + +Synonyms and <> interact with each other in the following ways: + +[discrete] +====== Stop token filter *before* synonym token filter + +Stop words will be removed from the synonym rule definition. +This can can cause errors on the synonym rule. + +[WARNING] +==== +Invalid synonym rules can cause errors when applying analyzer changes. +For reloadable analyzers, this prevents reloading and applying changes. +You must correct errors in the synonym rules and reload the analyzer. + +An index with invalid synonym rules cannot be reopened, making it inoperable when: + +* A node containing the index starts +* The index is opened from a closed state +* A node restart occurs (which reopens the node assigned shards) +==== + +For *explicit synonym rules* like `foo, bar => baz` with a stop filter that removes `bar`: + +- If `lenient` is set to `false`, an error will be raised as `bar` would be removed from the left hand side of the synonym rule. +- If `lenient` is set to `true`, the rule `foo => baz` will be added and `bar => baz` will be ignored. + +If the stop filter removed `baz` instead: + +- If `lenient` is set to `false`, an error will be raised as `baz` would be removed from the right hand side of the synonym rule. +- If `lenient` is set to `true`, the synonym will have no effect as the target word is removed. + +For *equivalent synonym rules* like `foo, bar, baz` and `expand: true, with a stop filter that removes `bar`: + +- If `lenient` is set to `false`, an error will be raised as `bar` would be removed from the synonym rule. +- If `lenient` is set to `true`, the synonyms added would be equivalent to the following synonym rules, which do not contain the removed word: + +``` +foo => foo +foo => baz +baz => foo +baz => baz +``` + +[discrete] +====== Stop token filter *after* synonym token filter + +The stop filter will remove the terms from the resulting synonym expansion. + +For example, a synonym rule like `foo, bar => baz` and a stop filter that removes `baz` will get no matches for `foo` or `bar`, as both would get expanded to `baz` which is removed by the stop filter. + +If the stop filter removed `foo` instead, then searching for `foo` would get expanded to `baz`, which is not removed by the stop filter thus potentially providing matches for `baz`. diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 046cd297b5092..1658f016db60b 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -73,47 +73,45 @@ Additional settings are: <> search analyzers to pick up changes to synonym files. Only to be used for search analyzers. * `expand` (defaults to `true`). -* `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important -to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request: - - -[source,console] --------------------------------------------------- -PUT /test_index -{ - "settings": { - "index": { - "analysis": { - "analyzer": { - "synonym": { - "tokenizer": "standard", - "filter": [ "my_stop", "synonym" ] - } - }, - "filter": { - "my_stop": { - "type": "stop", - "stopwords": [ "bar" ] - }, - "synonym": { - "type": "synonym", - "lenient": true, - "synonyms": [ "foo, bar => baz" ] - } - } - } - } - } -} --------------------------------------------------- +Expands definitions for equivalent synonym rules. +See <>. +* `lenient` (defaults to `false`). +If `true` ignores errors while parsing the synonym configuration. +It is important to note that only those synonym rules which cannot get parsed are ignored. +See <> for an example of `lenient` behaviour for invalid synonym rules. + +[discrete] +[[synonym-tokenizer-expand-equivalent-synonyms]] +===== `expand` equivalent synonym rules + +The `expand` parameter controls whether to expand equivalent synonym rules. +Consider a synonym defined like: + +`foo, bar, baz` + +Using `expand: true`, the synonym rule would be expanded into: -With the above request the word `bar` gets skipped but a mapping `foo => baz` is still added. However, if the mapping -being added was `foo, baz => bar` nothing would get added to the synonym list. This is because the target word for the -mapping is itself eliminated because it was a stop word. Similarly, if the mapping was "bar, foo, baz" and `expand` was -set to `false` no mapping would get added as when `expand=false` the target mapping is the first word. However, if -`expand=true` then the mappings added would be equivalent to `foo, baz => foo, baz` i.e, all mappings other than the -stop word. +``` +foo => foo +foo => bar +foo => baz +bar => foo +bar => bar +bar => baz +baz => foo +baz => bar +baz => baz +``` +When `expand` is set to `false`, the synonym rule is not expanded and the first synonym is treated as the canonical representation. The synonym would be equivalent to: + +``` +foo => foo +bar => foo +baz => foo +``` + +The `expand` parameter does not affect explicit synonym rules, like `foo, bar => baz`. [discrete] [[synonym-tokenizer-ignore_case-deprecated]] @@ -135,7 +133,7 @@ To apply synonyms, you will need to include a synonym token filters into an anal "my_analyzer": { "type": "custom", "tokenizer": "standard", - "filter": ["stemmer", "synonym_graph"] + "filter": ["stemmer", "synonym"] } } ---- @@ -148,10 +146,7 @@ Order is important for your token filters. Text will be processed first through filters preceding the synonym filter before being processed by the synonym filter. {es} will also use the token filters preceding the synonym filter in a tokenizer chain to parse the entries in a synonym file or synonym set. -In the above example, the synonyms graph token filter is placed after a stemmer. The stemmer will also be applied to the synonym entries. - -The synonym rules should not contain words that are removed by a filter that appears later in the chain (like a `stop` filter). -Removing a term from a synonym rule means there will be no matching for it at query time. +In the above example, the synonyms token filter is placed after a stemmer. The stemmer will also be applied to the synonym entries. Because entries in the synonym map cannot have stacked positions, some token filters may cause issues here. Token filters that produce multiple versions of a token may choose which version of the token to emit when parsing synonyms. @@ -159,3 +154,59 @@ For example, `asciifolding` will only produce the folded version of the token. Others, like `multiplexer`, `word_delimiter_graph` or `ngram` will throw an error. If you need to build analyzers that include both multi-token filters and synonym filters, consider using the <> filter, with the multi-token filters in one branch and the synonym filter in the other. + +[discrete] +[[synonym-tokenizer-stop-token-filter]] +===== Synonyms and `stop` token filters + +Synonyms and <> interact with each other in the following ways: + +[discrete] +====== Stop token filter *before* synonym token filter + +Stop words will be removed from the synonym rule definition. +This can can cause errors on the synonym rule. + +[WARNING] +==== +Invalid synonym rules can cause errors when applying analyzer changes. +For reloadable analyzers, this prevents reloading and applying changes. +You must correct errors in the synonym rules and reload the analyzer. + +An index with invalid synonym rules cannot be reopened, making it inoperable when: + +* A node containing the index starts +* The index is opened from a closed state +* A node restart occurs (which reopens the node assigned shards) +==== + +For *explicit synonym rules* like `foo, bar => baz` with a stop filter that removes `bar`: + +- If `lenient` is set to `false`, an error will be raised as `bar` would be removed from the left hand side of the synonym rule. +- If `lenient` is set to `true`, the rule `foo => baz` will be added and `bar => baz` will be ignored. + +If the stop filter removed `baz` instead: + +- If `lenient` is set to `false`, an error will be raised as `baz` would be removed from the right hand side of the synonym rule. +- If `lenient` is set to `true`, the synonym will have no effect as the target word is removed. + +For *equivalent synonym rules* like `foo, bar, baz` and `expand: true, with a stop filter that removes `bar`: + +- If `lenient` is set to `false`, an error will be raised as `bar` would be removed from the synonym rule. +- If `lenient` is set to `true`, the synonyms added would be equivalent to the following synonym rules, which do not contain the removed word: + +``` +foo => foo +foo => baz +baz => foo +baz => baz +``` + +[discrete] +====== Stop token filter *after* synonym token filter + +The stop filter will remove the terms from the resulting synonym expansion. + +For example, a synonym rule like `foo, bar => baz` and a stop filter that removes `baz` will get no matches for `foo` or `bar`, as both would get expanded to `baz` which is removed by the stop filter. + +If the stop filter removed `foo` instead, then searching for `foo` would get expanded to `baz`, which is not removed by the stop filter thus potentially providing matches for `baz`. diff --git a/docs/reference/analysis/tokenfilters/synonyms-format.asciidoc b/docs/reference/analysis/tokenfilters/synonyms-format.asciidoc index 63dd72dade8d0..e780c24963312 100644 --- a/docs/reference/analysis/tokenfilters/synonyms-format.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonyms-format.asciidoc @@ -15,7 +15,7 @@ This format uses two different definitions: ipod, i-pod, i pod computer, pc, laptop ---- -* Explicit mappings: Matches a group of words to other words. Words on the left hand side of the rule definition are expanded into all the possibilities described on the right hand side. Example: +* Explicit synonyms: Matches a group of words to other words. Words on the left hand side of the rule definition are expanded into all the possibilities described on the right hand side. Example: + [source,synonyms] ---- diff --git a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc index 596af695b7910..61d3a1d8f925b 100644 --- a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc +++ b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc @@ -82,6 +82,19 @@ If an index is created referencing a nonexistent synonyms set, the index will re The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. ====== +[WARNING] +==== +Invalid synonym rules can cause errors when applying analyzer changes. +For reloadable analyzers, this prevents reloading and applying changes. +You must correct errors in the synonym rules and reload the analyzer. + +An index with invalid synonym rules cannot be reopened, making it inoperable when: + +* A node containing the index starts +* The index is opened from a closed state +* A node restart occurs (which reopens the node assigned shards) +==== + {es} uses synonyms as part of the <>. You can use two types of <> to include synonyms: diff --git a/docs/reference/synonyms/apis/synonyms-apis.asciidoc b/docs/reference/synonyms/apis/synonyms-apis.asciidoc index c9de52939b2fe..dbbc26c36d3df 100644 --- a/docs/reference/synonyms/apis/synonyms-apis.asciidoc +++ b/docs/reference/synonyms/apis/synonyms-apis.asciidoc @@ -21,6 +21,23 @@ These filters are applied as part of the <> process by the << NOTE: Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonyms sets. +WARNING: Synonyms sets must exist before they can be added to indices. +If an index is created referencing a nonexistent synonyms set, the index will remain in a partially created and inoperable state. +The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. + +[WARNING] +==== +Invalid synonym rules can cause errors when applying analyzer changes. +For reloadable analyzers, this prevents reloading and applying changes. +You must correct errors in the synonym rules and reload the analyzer. + +An index with invalid synonym rules cannot be reopened, making it inoperable when: + +* A node containing the index starts +* The index is opened from a closed state +* A node restart occurs (which reopens the node assigned shards) +==== + [discrete] [[synonyms-sets-apis]] === Synonyms sets APIs From d4c7dabde5af62b773011e4c8f20113d34194462 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 18 Jul 2024 12:06:16 +0100 Subject: [PATCH 078/406] Update known-issues for the features upgrade bug #111014 --- docs/reference/release-notes/8.12.0.asciidoc | 7 +++++++ docs/reference/release-notes/8.12.1.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.12.2.asciidoc | 10 ++++++++++ docs/reference/release-notes/8.13.0.asciidoc | 6 +++--- docs/reference/release-notes/8.13.1.asciidoc | 6 +++--- docs/reference/release-notes/8.13.2.asciidoc | 6 +++--- docs/reference/release-notes/8.13.3.asciidoc | 6 +++--- docs/reference/release-notes/8.13.4.asciidoc | 6 +++--- docs/reference/release-notes/8.14.0.asciidoc | 6 +++--- docs/reference/release-notes/8.14.1.asciidoc | 6 +++--- docs/reference/release-notes/8.14.2.asciidoc | 6 +++--- 11 files changed, 51 insertions(+), 24 deletions(-) diff --git a/docs/reference/release-notes/8.12.0.asciidoc b/docs/reference/release-notes/8.12.0.asciidoc index 4c0fc50584b9f..bfa99401f41a2 100644 --- a/docs/reference/release-notes/8.12.0.asciidoc +++ b/docs/reference/release-notes/8.12.0.asciidoc @@ -14,6 +14,13 @@ there are deleted documents in the segments, quantiles may fail to build and pre This issue is fixed in 8.12.1. +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. This issue is fixed in 8.15.0. + [[breaking-8.12.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.12.1.asciidoc b/docs/reference/release-notes/8.12.1.asciidoc index 9aa9a11b3bf02..8ebe5cbac3852 100644 --- a/docs/reference/release-notes/8.12.1.asciidoc +++ b/docs/reference/release-notes/8.12.1.asciidoc @@ -3,6 +3,16 @@ Also see <>. +[[known-issues-8.12.1]] +[float] +=== Known issues +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. This issue is fixed in 8.15.0. + [[bug-8.12.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.12.2.asciidoc b/docs/reference/release-notes/8.12.2.asciidoc index 2be8449b6c1df..44202ee8226eb 100644 --- a/docs/reference/release-notes/8.12.2.asciidoc +++ b/docs/reference/release-notes/8.12.2.asciidoc @@ -3,6 +3,16 @@ Also see <>. +[[known-issues-8.12.2]] +[float] +=== Known issues +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. This issue is fixed in 8.15.0. + [[bug-8.12.2]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc index 4bb2913f07be7..65c77ff602e34 100644 --- a/docs/reference/release-notes/8.13.0.asciidoc +++ b/docs/reference/release-notes/8.13.0.asciidoc @@ -21,12 +21,12 @@ This affects clusters running version 8.10 or later, with an active downsampling https://www.elastic.co/guide/en/elasticsearch/reference/current/downsampling-ilm.html[configuration] or a configuration that was activated at some point since upgrading to version 8.10 or later. -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[breaking-8.13.0]] [float] diff --git a/docs/reference/release-notes/8.13.1.asciidoc b/docs/reference/release-notes/8.13.1.asciidoc index 572f9fe1172a9..c95fb1e720651 100644 --- a/docs/reference/release-notes/8.13.1.asciidoc +++ b/docs/reference/release-notes/8.13.1.asciidoc @@ -6,12 +6,12 @@ Also see <>. [[known-issues-8.13.1]] [float] === Known issues -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[bug-8.13.1]] [float] diff --git a/docs/reference/release-notes/8.13.2.asciidoc b/docs/reference/release-notes/8.13.2.asciidoc index 20ae7abbb5769..d4e2cc794b7e8 100644 --- a/docs/reference/release-notes/8.13.2.asciidoc +++ b/docs/reference/release-notes/8.13.2.asciidoc @@ -6,12 +6,12 @@ Also see <>. [[known-issues-8.13.2]] [float] === Known issues -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[bug-8.13.2]] [float] diff --git a/docs/reference/release-notes/8.13.3.asciidoc b/docs/reference/release-notes/8.13.3.asciidoc index ea51bd6f9b743..bbad07f36a31e 100644 --- a/docs/reference/release-notes/8.13.3.asciidoc +++ b/docs/reference/release-notes/8.13.3.asciidoc @@ -13,12 +13,12 @@ SQL:: [[known-issues-8.13.3]] [float] === Known issues -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[bug-8.13.3]] [float] diff --git a/docs/reference/release-notes/8.13.4.asciidoc b/docs/reference/release-notes/8.13.4.asciidoc index b60c9f485bb31..bb2fe5789d56f 100644 --- a/docs/reference/release-notes/8.13.4.asciidoc +++ b/docs/reference/release-notes/8.13.4.asciidoc @@ -6,12 +6,12 @@ Also see <>. [[known-issues-8.13.4]] [float] === Known issues -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[bug-8.13.4]] [float] diff --git a/docs/reference/release-notes/8.14.0.asciidoc b/docs/reference/release-notes/8.14.0.asciidoc index 5b92c49ced70a..034b1ce39be1b 100644 --- a/docs/reference/release-notes/8.14.0.asciidoc +++ b/docs/reference/release-notes/8.14.0.asciidoc @@ -15,12 +15,12 @@ Security:: [[known-issues-8.14.0]] [float] === Known issues -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[bug-8.14.0]] [float] diff --git a/docs/reference/release-notes/8.14.1.asciidoc b/docs/reference/release-notes/8.14.1.asciidoc index 1cab442eb9ac1..0b5f5b0a4e804 100644 --- a/docs/reference/release-notes/8.14.1.asciidoc +++ b/docs/reference/release-notes/8.14.1.asciidoc @@ -7,12 +7,12 @@ Also see <>. [[known-issues-8.14.1]] [float] === Known issues -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[bug-8.14.1]] [float] diff --git a/docs/reference/release-notes/8.14.2.asciidoc b/docs/reference/release-notes/8.14.2.asciidoc index 9273355106a03..f52acf0b6a7e8 100644 --- a/docs/reference/release-notes/8.14.2.asciidoc +++ b/docs/reference/release-notes/8.14.2.asciidoc @@ -8,12 +8,12 @@ Also see <>. [[known-issues-8.14.2]] [float] === Known issues -* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +* When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, information about the new functionality of these upgraded nodes may not be registered properly with the master node. -This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +This can lead to some new functionality added since 8.12.0 not being accessible on the upgraded cluster. If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes -are upgraded. +are upgraded. This issue is fixed in 8.15.0. [[bug-8.14.2]] [float] From 858440dd4bc55f3e7fd2376c90528b5877da070d Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Thu, 18 Jul 2024 13:52:49 +0200 Subject: [PATCH 079/406] [8.15] Inject `host.name` field without relying on (component) templates (#110938) (#111031) * Inject `host.name` field without relying on (component) templates (#110938) We do not want to rely on templates or component templates to include the host.name field in indices using LogsDB. The host.name field is a field we sort on by default when LogsDB is used. As a result, we just inject it by default, the same way we do for the @timestamp field. This prevents sorting errors due to missing host.name field in mappings. The host.name is a keyword field and depending on the value of subobjects it will be mapped as a name keyword nested inside a host or as a flat host.name keyword. We also include ignore_above as we normally do for keywords in observability mappings. * Enable missing hostname test --- .../rest-api-spec/test/logsdb/10_settings.yml | 70 ++++++++++--------- .../org/elasticsearch/index/IndexMode.java | 5 ++ .../main/resources/logs@mappings-logsdb.json | 31 -------- .../xpack/stack/StackTemplateRegistry.java | 2 +- 4 files changed, 42 insertions(+), 66 deletions(-) delete mode 100644 x-pack/plugin/core/template-resources/src/main/resources/logs@mappings-logsdb.json diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index 4976e5e15adbe..de751e7c5f4df 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -116,40 +116,42 @@ using default timestamp field mapping: --- missing hostname field: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" - - - do: - catch: bad_request - indices.create: - index: test-hostname-missing - body: - settings: - index: - mode: logs - number_of_replicas: 0 - number_of_shards: 2 - mappings: - properties: - "@timestamp": - type: date - agent_id: - type: keyword - process_id: - type: integer - http_method: - type: keyword - message: - type: text - - - match: { error.root_cause.0.type: "illegal_argument_exception" } - - match: { error.type: "illegal_argument_exception" } - - match: { error.reason: "unknown index sort field:[host.name]" } + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ logs_index_mode ] + reason: "Support for 'logs' index mode capability required" + + - do: + indices.create: + index: test-hostname-missing + body: + settings: + index: + mode: logs + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + agent_id: + type: keyword + process_id: + type: integer + http_method: + type: keyword + message: + type: text + + - do: + indices.get_settings: + index: test-hostname-missing + + - is_true: test-hostname-missing + - match: { test-hostname-missing.settings.index.mode: "logs" } --- missing sort field: diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 3df5b3fe288a2..1050455392482 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -20,6 +20,7 @@ import org.elasticsearch.index.mapper.DocumentDimensions; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -345,6 +346,10 @@ protected static String tsdbMode() { .startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH) .field("type", DateFieldMapper.CONTENT_TYPE) .endObject() + .startObject("host.name") + .field("type", KeywordFieldMapper.CONTENT_TYPE) + .field("ignore_above", 1024) + .endObject() .endObject() .endObject()) ); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings-logsdb.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings-logsdb.json deleted file mode 100644 index 167efbd3ffaf5..0000000000000 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@mappings-logsdb.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "template": { - "mappings": { - "date_detection": false, - "properties": { - "@timestamp": { - "type": "date" - }, - "host.name": { - "type": "keyword" - }, - "data_stream.type": { - "type": "constant_keyword", - "value": "logs" - }, - "data_stream.dataset": { - "type": "constant_keyword" - }, - "data_stream.namespace": { - "type": "constant_keyword" - } - } - } - }, - "_meta": { - "description": "default mappings for the logs index template installed by x-pack", - "managed": true - }, - "version": ${xpack.stack.template.version}, - "deprecated": ${xpack.stack.template.deprecated} -} diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index 648146ccdcc61..7dc1dfb6cf3df 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -146,7 +146,7 @@ private Map loadComponentTemplateConfigs(boolean logs ), new IndexTemplateConfig( LOGS_MAPPINGS_COMPONENT_TEMPLATE_NAME, - logsDbEnabled ? "/logs@mappings-logsdb.json" : "/logs@mappings.json", + "/logs@mappings.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE, ADDITIONAL_TEMPLATE_VARIABLES From 40f52014aee05d5f6ab9a83710d9b07a8b29d4c4 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 18 Jul 2024 13:36:09 +0100 Subject: [PATCH 080/406] [DOCS] Fix rendering bug (#111025) (#111037) Closes https://github.com/elastic/elasticsearch/issues/111023 --- docs/reference/search/search.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 15985088a6ff7..501d645665a02 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -141,7 +141,7 @@ When unspecified, the pre-filter phase is executed if any of these conditions is - The primary sort of the query targets an indexed field. [[search-preference]] -tag::search-preference[] +// tag::search-preference[] `preference`:: (Optional, string) Nodes and shards used for the search. By default, {es} selects from eligible @@ -178,7 +178,7 @@ Any string that does not start with `_`. If the cluster state and selected shards do not change, searches using the same `` value are routed to the same shards in the same order. ==== -end::search-preference[] +// end::search-preference[] [[search-api-query-params-q]] From 8f9d06b24e26a439bd6c86af5d0038a6ef126336 Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Thu, 18 Jul 2024 14:44:34 +0100 Subject: [PATCH 081/406] Update stack monitoring mapping for apm-server metrics (#110568) (#110687) Update stack monitoring template for .monitoring-beats-mb to include latest apm-server monitoring metrics. All stack monitoring apm-server metrics references in kibana should be intact. To avoid breaking stack monitoring UI, although beat.stats.apm_server.server.response.errors.concurrency is unused and is not present in apm-server stats, it is manually kept in the mapping. (cherry picked from commit 2fb6c80df248e01f9de834ca8ead4bd07ff14fa0) --- .../main/resources/monitoring-beats-mb.json | 1535 +++++++++++++---- .../src/main/resources/monitoring-beats.json | 764 +++++--- .../MonitoringTemplateRegistry.java | 2 +- 3 files changed, 1732 insertions(+), 569 deletions(-) diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json index fab8ca451358f..7457dce805eca 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json @@ -1,5 +1,7 @@ { - "index_patterns": [".monitoring-beats-${xpack.stack.monitoring.template.version}-*"], + "index_patterns": [ + ".monitoring-beats-${xpack.stack.monitoring.template.version}-*" + ], "version": ${xpack.stack.monitoring.template.release.version}, "template": { "mappings": { @@ -198,6 +200,9 @@ "ratelimit": { "type": "long" }, + "timeout": { + "type": "long" + }, "toolarge": { "type": "long" }, @@ -212,16 +217,6 @@ } } }, - "request": { - "properties": { - "count": { - "type": "long" - } - } - }, - "unset": { - "type": "long" - }, "valid": { "properties": { "accepted": { @@ -239,151 +234,436 @@ } } } + }, + "unset": { + "type": "long" } } }, - "decoder": { + "agentcfg": { "properties": { - "deflate": { - "properties": { - "content-length": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "gzip": { - "properties": { - "content-length": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "missing-content-length": { + "elasticsearch": { "properties": { - "count": { - "type": "long" - } - } - }, - "reader": { - "properties": { - "count": { - "type": "long" - }, - "size": { - "type": "long" - } - } - }, - "uncompressed": { - "properties": { - "content-length": { - "type": "long" + "cache": { + "properties": { + "entries": { + "properties": { + "count": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } + } + } }, - "count": { - "type": "long" + "fetch": { + "properties": { + "es": { + "type": "long" + }, + "fallback": { + "type": "long" + }, + "invalid": { + "type": "long" + }, + "unavailable": { + "type": "long" + } + } } } } } }, - "processor": { + "jaeger": { "properties": { - "error": { + "grpc": { "properties": { - "decoding": { + "collect": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "frames": { - "type": "long" - }, - "spans": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "transformations": { - "type": "long" - }, - "validation": { + "sampling": { "properties": { - "count": { - "type": "long" + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "long" + } + } + } + } }, - "errors": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } - }, - "metric": { + } + } + }, + "otlp": { + "properties": { + "grpc": { "properties": { - "decoding": { + "logs": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "transformations": { - "type": "long" + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } + }, + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } }, - "validation": { + "traces": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } }, - "sourcemap": { + "http": { "properties": { - "counter": { - "type": "long" + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } }, - "decoding": { + "metrics": { "properties": { - "count": { - "type": "long" + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "validation": { + "traces": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } + } + } + }, + "processor": { + "properties": { + "error": { + "properties": { + "transformations": { + "type": "long" + } + } + }, + "metric": { + "properties": { + "transformations": { + "type": "long" + } + } }, "span": { "properties": { @@ -392,60 +672,127 @@ } } }, - "transaction": { + "stream": { "properties": { - "decoding": { + "accepted": { + "type": "long" + }, + "errors": { "properties": { - "count": { + "invalid": { "type": "long" }, - "errors": { + "toolarge": { "type": "long" } } - }, - "frames": { - "type": "long" - }, - "spans": { + } + } + }, + "transaction": { + "properties": { + "transformations": { "type": "long" - }, - "stacktraces": { + } + } + } + } + }, + "root": { + "properties": { + "request": { + "properties": { + "count": { "type": "long" - }, - "transactions": { + } + } + }, + "response": { + "properties": { + "count": { "type": "long" }, - "transformations": { - "type": "long" + "errors": { + "properties": { + "closed": { + "type": "long" + }, + "count": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "internal": { + "type": "long" + }, + "invalidquery": { + "type": "long" + }, + "method": { + "type": "long" + }, + "notfound": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { + "type": "long" + } + } }, - "validation": { + "valid": { "properties": { + "accepted": { + "type": "long" + }, "count": { "type": "long" }, - "errors": { + "notmodified": { + "type": "long" + }, + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" + } + } + }, + "sampling": { + "properties": { + "transactions_dropped": { + "type": "long" } } }, "server": { "properties": { - "concurrent": { - "properties": { - "wait": { - "properties": { - "ms": { - "type": "long" - } - } - } - } - }, "request": { "properties": { "count": { @@ -478,21 +825,33 @@ "internal": { "type": "long" }, + "invalidquery": { + "type": "long" + }, "method": { "type": "long" }, + "notfound": { + "type": "long" + }, "queue": { "type": "long" }, "ratelimit": { "type": "long" }, + "timeout": { + "type": "long" + }, "toolarge": { "type": "long" }, "unauthorized": { "type": "long" }, + "unavailable": { + "type": "long" + }, "validate": { "type": "long" } @@ -506,12 +865,18 @@ "count": { "type": "long" }, + "notmodified": { + "type": "long" + }, "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } } @@ -918,6 +1283,37 @@ "type": "long" } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "long" + }, + "completed": { + "type": "long" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "long" + }, + "created": { + "type": "long" + }, + "destroyed": { + "type": "long" + } + } + } + } + } + } } } }, @@ -1135,6 +1531,10 @@ "type": "alias", "path": "beat.stats.apm_server.acm.response.errors.ratelimit" }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.timeout" + }, "toolarge": { "type": "alias", "path": "beat.stats.apm_server.acm.response.errors.toolarge" @@ -1153,18 +1553,6 @@ } } }, - "request": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.request.count" - } - } - }, - "unset": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.unset" - }, "valid": { "properties": { "accepted": { @@ -1179,9 +1567,485 @@ "type": "alias", "path": "beat.stats.apm_server.acm.response.valid.notmodified" }, - "ok": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.valid.ok" + "ok": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.valid.ok" + } + } + } + } + }, + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.acm.unset" + } + } + }, + "agentcfg": { + "properties": { + "elasticsearch": { + "properties": { + "cache": { + "properties": { + "entries": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.entries.count" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.refresh.failures" + }, + "successes": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.refresh.successes" + } + } + } + } + }, + "fetch": { + "properties": { + "es": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.es" + }, + "fallback": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.fallback" + }, + "invalid": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.invalid" + }, + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.unavailable" + } + } + } + } + } + } + }, + "jaeger": { + "properties": { + "grpc": { + "properties": { + "collect": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.valid.count" + } + } + } + } + } + } + }, + "sampling": { + "properties": { + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.event.received.count" + } + } + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.errors.count" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.valid.count" + } + } + } + } + } + } + } + } + } + } + }, + "otlp": { + "properties": { + "grpc": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.valid.count" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.consumer.unsupported_dropped" + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.valid.count" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.valid.count" + } + } + } + } + } + } + } + } + }, + "http": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.valid.count" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.consumer.unsupported_dropped" + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.valid.count" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.valid.count" + } + } + } + } } } } @@ -1189,248 +2053,180 @@ } } }, - "decoder": { + "processor": { "properties": { - "deflate": { + "error": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.count" + "path": "beat.stats.apm_server.processor.error.transformations" } } }, - "gzip": { + "metric": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.count" + "path": "beat.stats.apm_server.processor.metric.transformations" } } }, - "missing-content-length": { + "span": { "properties": { - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.missing-content-length.count" + "path": "beat.stats.apm_server.processor.span.transformations" } } }, - "reader": { + "stream": { "properties": { - "count": { + "accepted": { "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.count" + "path": "beat.stats.apm_server.processor.stream.accepted" }, - "size": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.size" + "errors": { + "properties": { + "invalid": { + "type": "alias", + "path": "beat.stats.apm_server.processor.stream.errors.invalid" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.processor.stream.errors.toolarge" + } + } } } }, - "uncompressed": { + "transaction": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.count" + "path": "beat.stats.apm_server.processor.transaction.transformations" } } } } }, - "processor": { + "root": { "properties": { - "error": { + "request": { "properties": { - "decoding": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.count" - }, - "errors": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.errors" - } - } - }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.spans" - }, - "stacktraces": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.stacktraces" - }, - "transformations": { + "path": "beat.stats.apm_server.root.request.count" + } + } + }, + "response": { + "properties": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.transformations" + "path": "beat.stats.apm_server.root.response.count" }, - "validation": { + "errors": { "properties": { + "closed": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.closed" + }, "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.count" + "path": "beat.stats.apm_server.root.response.errors.count" }, - "errors": { + "decode": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.errors" - } - } - } - } - }, - "metric": { - "properties": { - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.decode" + }, + "forbidden": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.forbidden" }, - "errors": { + "internal": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.errors" - } - } - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.metric.transformations" - }, - "validation": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.internal" + }, + "invalidquery": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.count" + "path": "beat.stats.apm_server.root.response.errors.invalidquery" }, - "errors": { + "method": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.errors" - } - } - } - } - }, - "sourcemap": { - "properties": { - "counter": { - "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.counter" - }, - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.method" + }, + "notfound": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.notfound" }, - "errors": { + "queue": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.errors" - } - } - }, - "validation": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.queue" + }, + "ratelimit": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.count" + "path": "beat.stats.apm_server.root.response.errors.ratelimit" }, - "errors": { + "timeout": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.errors" - } - } - } - } - }, - "span": { - "properties": { - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.span.transformations" - } - } - }, - "transaction": { - "properties": { - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.timeout" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.toolarge" + }, + "unauthorized": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.unauthorized" }, - "errors": { + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.unavailable" + }, + "validate": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.errors" + "path": "beat.stats.apm_server.root.response.errors.validate" } } }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.spans" - }, - "stacktraces": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.stacktraces" - }, - "transactions": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transactions" - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transformations" - }, - "validation": { + "valid": { "properties": { + "accepted": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.valid.accepted" + }, "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.count" + "path": "beat.stats.apm_server.root.response.valid.count" }, - "errors": { + "notmodified": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.valid.notmodified" + }, + "ok": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.errors" + "path": "beat.stats.apm_server.root.response.valid.ok" } } } } + }, + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.root.unset" + } + } + }, + "sampling": { + "properties": { + "transactions_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.sampling.transactions_dropped" } } }, "server": { "properties": { - "concurrent": { - "properties": { - "wait": { - "properties": { - "ms": { - "type": "alias", - "path": "beat.stats.apm_server.server.concurrent.wait.ms" - } - } - } - } - }, "request": { "properties": { "count": { @@ -1471,10 +2267,18 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.internal" }, + "invalidquery": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.invalidquery" + }, "method": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.method" }, + "notfound": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.notfound" + }, "queue": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.queue" @@ -1483,6 +2287,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.ratelimit" }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.timeout" + }, "toolarge": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.toolarge" @@ -1491,6 +2299,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.unauthorized" }, + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.unavailable" + }, "validate": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.validate" @@ -1507,6 +2319,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.valid.count" }, + "notmodified": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.valid.notmodified" + }, "ok": { "type": "alias", "path": "beat.stats.apm_server.server.response.valid.ok" @@ -1514,49 +2330,10 @@ } } } - } - } - }, - "sampling": { - "properties": { - "transactions_dropped": { - "type": "long" }, - "tail": { - "properties": { - "dynamic_service_groups": { - "type": "long" - }, - "storage": { - "properties": { - "lsm_size": { - "type": "long" - }, - "value_log_size": { - "type": "long" - } - } - }, - "events": { - "properties": { - "processed": { - "type": "long" - }, - "dropped": { - "type": "long" - }, - "stored": { - "type": "long" - }, - "sampled": { - "type": "long" - }, - "head_unsampled": { - "type": "long" - } - } - } - } + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.server.unset" } } } @@ -1985,6 +2762,42 @@ } } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.bulk_requests.available" + }, + "completed": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.bulk_requests.completed" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.active" + }, + "created": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.created" + }, + "destroyed": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.destroyed" + } + } + } + } + } + } } } }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json index 6dee05564cc10..d699317c29da3 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json @@ -346,17 +346,11 @@ "response": { "properties": { "count": { - "type": "long" + "type": "long" }, "errors": { "properties": { - "validate": { - "type": "long" - }, - "internal": { - "type": "long" - }, - "queue": { + "closed": { "type": "long" }, "count": { @@ -365,13 +359,13 @@ "decode": { "type": "long" }, - "toolarge": { + "forbidden": { "type": "long" }, - "unavailable": { + "internal": { "type": "long" }, - "forbidden": { + "invalidquery": { "type": "long" }, "method": { @@ -380,125 +374,454 @@ "notfound": { "type": "long" }, - "invalidquery": { + "queue": { "type": "long" }, "ratelimit": { "type": "long" }, - "closed": { + "timeout": { + "type": "long" + }, + "toolarge": { "type": "long" }, "unauthorized": { "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { + "type": "long" } } }, "valid": { "properties": { - "notmodified": { + "accepted": { "type": "long" }, "count": { "type": "long" }, - "ok": { + "notmodified": { "type": "long" }, - "accepted": { - "type": "long" - } - } - }, - "unset": { - "type": "long" - }, - "request": { - "properties": { - "count": { + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } }, - "server": { + "agentcfg": { "properties": { - "request": { + "elasticsearch": { "properties": { - "count": { - "type": "long" - } - } - }, - "concurrent": { - "properties": { - "wait": { + "cache": { "properties": { - "ms": { - "type": "long" + "entries": { + "properties": { + "count": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } } } - } - } - }, - "response": { - "properties": { - "count": { - "type": "long" }, - "errors": { + "fetch": { "properties": { - "count": { + "es": { "type": "long" }, - "toolarge": { + "fallback": { "type": "long" }, - "validate": { + "invalid": { "type": "long" }, - "ratelimit": { + "unavailable": { "type": "long" + } + } + } + } + } + } + }, + "jaeger": { + "properties": { + "grpc": { + "properties": { + "collect": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "queue": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "sampling": { + "properties": { + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "long" + } + } + } + } }, - "closed": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "forbidden": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + }, + "otlp": { + "properties": { + "grpc": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "concurrency": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "unauthorized": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "internal": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "decode": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "http": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "method": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "valid": { + "metrics": { "properties": { - "ok": { - "type": "long" + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "accepted": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "count": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } @@ -506,195 +829,138 @@ } } }, - "decoder": { + "processor": { "properties": { - "deflate": { + "error": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "gzip": { + "metric": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "uncompressed": { + "span": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "reader": { + "stream": { "properties": { - "size": { + "accepted": { "type": "long" }, - "count": { - "type": "long" + "errors": { + "properties": { + "invalid": { + "type": "long" + }, + "toolarge": { + "type": "long" + } + } } } }, - "missing-content-length": { + "transaction": { "properties": { - "count": { + "transformations": { "type": "long" } } } } - }, - "processor": { + "root": { "properties": { - "metric": { + "request": { "properties": { - "decoding": { - "properties": { - "errors": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "transformations": { + "count": { "type": "long" } } }, - "sourcemap": { + "response": { "properties": { - "counter": { + "count": { "type": "long" }, - "decoding": { + "errors": { "properties": { - "errors": { + "closed": { "type": "long" }, "count": { "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { + }, + "decode": { "type": "long" }, - "count": { + "forbidden": { "type": "long" - } - } - } - } - }, - "transaction": { - "properties": { - "decoding": { - "properties": { - "errors": { + }, + "internal": { "type": "long" }, - "count": { + "invalidquery": { "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { + }, + "method": { "type": "long" }, - "count": { + "notfound": { "type": "long" - } - } - }, - "transformations": { - "type": "long" - }, - "transactions": { - "type": "long" - }, - "spans": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "frames": { - "type": "long" - } - } - }, - "error": { - "properties": { - "decoding": { - "properties": { - "errors": { + }, + "queue": { "type": "long" }, - "count": { + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { "type": "long" } } }, - "validation": { + "valid": { "properties": { - "errors": { + "accepted": { "type": "long" }, "count": { "type": "long" + }, + "notmodified": { + "type": "long" + }, + "ok": { + "type": "long" } } - }, - "transformations": { - "type": "long" - }, - "errors": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "frames": { - "type": "long" } } }, - "span": { - "properties": { - "transformations": { - "type": "long" - } - } + "unset": { + "type": "long" } } }, @@ -702,42 +968,95 @@ "properties": { "transactions_dropped": { "type": "long" + } + } + }, + "server": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "tail": { + "response": { "properties": { - "dynamic_service_groups": { + "count": { "type": "long" }, - "storage": { + "errors": { "properties": { - "lsm_size": { + "closed": { "type": "long" }, - "value_log_size": { + "concurrency": { + "type": "long" + }, + "count": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "internal": { + "type": "long" + }, + "invalidquery": { + "type": "long" + }, + "method": { + "type": "long" + }, + "notfound": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { "type": "long" } } }, - "events": { + "valid": { "properties": { - "processed": { - "type": "long" - }, - "dropped": { + "accepted": { "type": "long" }, - "stored": { + "count": { "type": "long" }, - "sampled": { + "notmodified": { "type": "long" }, - "head_unsampled": { + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } } @@ -893,6 +1212,37 @@ } } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "long" + }, + "completed": { + "type": "long" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "long" + }, + "created": { + "type": "long" + }, + "destroyed": { + "type": "long" + } + } + } + } + } + } } } }, diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index 12eeaf8732235..e0433ea6fdd71 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 17; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 18; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; From b186ee92acefca9d35f3330718b10e0faa6ccfde Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 18 Jul 2024 17:03:19 +0200 Subject: [PATCH 082/406] Make skip_unavailable=true a notable change instead of breaking change (#110983) Co-authored-by: Najwa Harif <90753689+naj-h@users.noreply.github.com> --- docs/changelog/105792.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/changelog/105792.yaml b/docs/changelog/105792.yaml index 2ad5aa970c214..b9190e60cc96d 100644 --- a/docs/changelog/105792.yaml +++ b/docs/changelog/105792.yaml @@ -15,4 +15,4 @@ breaking: as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be set explicitly. - notable: false + notable: true From 5846c231f1c29cfe1dd6f79ca8a3c734bca7e71b Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 18 Jul 2024 17:18:54 +0200 Subject: [PATCH 083/406] Speed up collecting zero document string terms (#110922) (#111057) Use segment ordinals when possible to collect zero document buckets --- docs/changelog/110922.yaml | 5 ++ .../terms/MapStringTermsAggregator.java | 68 +++++++++++++++---- .../bucket/terms/TermsAggregatorTests.java | 59 ++++++++++++++++ .../DocumentLevelSecurityTests.java | 4 ++ 4 files changed, 124 insertions(+), 12 deletions(-) create mode 100644 docs/changelog/110922.yaml diff --git a/docs/changelog/110922.yaml b/docs/changelog/110922.yaml new file mode 100644 index 0000000000000..6a85ce57de103 --- /dev/null +++ b/docs/changelog/110922.yaml @@ -0,0 +1,5 @@ +pr: 110922 +summary: Speed up collecting zero document string terms +area: Aggregations +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index 9cea884667325..936fcf2edc225 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -9,7 +9,10 @@ import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.PriorityQueue; @@ -419,25 +422,66 @@ void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedD } // we need to fill-in the blanks for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) { - SortedBinaryDocValues values = valuesSource.bytesValues(ctx); - // brute force - for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) { - if (excludeDeletedDocs && ctx.reader().getLiveDocs() != null && ctx.reader().getLiveDocs().get(docId) == false) { - continue; + final Bits liveDocs = excludeDeletedDocs ? ctx.reader().getLiveDocs() : null; + if (liveDocs == null && valuesSource.hasOrdinals()) { + final SortedSetDocValues values = ((ValuesSource.Bytes.WithOrdinals) valuesSource).ordinalsValues(ctx); + collectZeroDocEntries(values, owningBucketOrd); + } else { + final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); + final BinaryDocValues singleton = FieldData.unwrapSingleton(values); + if (singleton != null) { + collectZeroDocEntries(singleton, liveDocs, ctx.reader().maxDoc(), owningBucketOrd); + } else { + collectZeroDocEntries(values, liveDocs, ctx.reader().maxDoc(), owningBucketOrd); } - if (values.advanceExact(docId)) { - int valueCount = values.docValueCount(); - for (int i = 0; i < valueCount; ++i) { - BytesRef term = values.nextValue(); - if (includeExclude == null || includeExclude.accept(term)) { - bucketOrds.add(owningBucketOrd, term); - } + } + } + } + + private void collectZeroDocEntries(SortedSetDocValues values, long owningBucketOrd) throws IOException { + final TermsEnum termsEnum = values.termsEnum(); + BytesRef term; + while ((term = termsEnum.next()) != null) { + if (includeExclude == null || includeExclude.accept(term)) { + bucketOrds.add(owningBucketOrd, term); + } + } + } + + private void collectZeroDocEntries(SortedBinaryDocValues values, Bits liveDocs, int maxDoc, long owningBucketOrd) + throws IOException { + // brute force + for (int docId = 0; docId < maxDoc; ++docId) { + if (liveDocs != null && liveDocs.get(docId) == false) { + continue; + } + if (values.advanceExact(docId)) { + final int valueCount = values.docValueCount(); + for (int i = 0; i < valueCount; ++i) { + final BytesRef term = values.nextValue(); + if (includeExclude == null || includeExclude.accept(term)) { + bucketOrds.add(owningBucketOrd, term); } } } } } + private void collectZeroDocEntries(BinaryDocValues values, Bits liveDocs, int maxDoc, long owningBucketOrd) throws IOException { + // brute force + for (int docId = 0; docId < maxDoc; ++docId) { + if (liveDocs != null && liveDocs.get(docId) == false) { + continue; + } + if (values.advanceExact(docId)) { + final BytesRef term = values.binaryValue(); + if (includeExclude == null || includeExclude.accept(term)) { + bucketOrds.add(owningBucketOrd, term); + } + } + } + } + @Override Supplier emptyBucketBuilder(long owningBucketOrd) { return () -> new StringTerms.Bucket(new BytesRef(), 0, null, showTermDocCountError, 0, format); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 788249fee1187..27f0b21d2767f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -329,6 +329,65 @@ public void testStringShardMinDocCount() throws IOException { } } + public void testStringShardZeroMinDocCount() throws IOException { + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string", true, true, Collections.emptyMap()); + for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").field("string") + .executionHint(executionMode.toString()) + .size(2) + .minDocCount(0) + .executionHint("map") + .excludeDeletedDocs(true) + .order(BucketOrder.key(true)); + + { + boolean delete = randomBoolean(); + // force single shard/segment + testCase(iw -> { + // force single shard/segment + iw.addDocuments(Arrays.asList(doc(fieldType, "a"), doc(fieldType, "b"), doc(fieldType, "c"), doc(fieldType, "d"))); + if (delete) { + iw.deleteDocuments(new TermQuery(new Term("string", "b"))); + } + }, (InternalTerms result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("a", result.getBuckets().get(0).getKeyAsString()); + assertEquals(0L, result.getBuckets().get(0).getDocCount()); + if (delete) { + assertEquals("c", result.getBuckets().get(1).getKeyAsString()); + } else { + assertEquals("b", result.getBuckets().get(1).getKeyAsString()); + } + assertEquals(0L, result.getBuckets().get(1).getDocCount()); + }, new AggTestConfig(aggregationBuilder, fieldType).withQuery(new TermQuery(new Term("string", "e")))); + } + + { + boolean delete = randomBoolean(); + // force single shard/segment + testCase(iw -> { + // force single shard/segment + iw.addDocuments( + Arrays.asList(doc(fieldType, "a"), doc(fieldType, "c", "d"), doc(fieldType, "b", "d"), doc(fieldType, "b")) + ); + if (delete) { + iw.deleteDocuments(new TermQuery(new Term("string", "b"))); + } + }, (InternalTerms result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("a", result.getBuckets().get(0).getKeyAsString()); + assertEquals(0L, result.getBuckets().get(0).getDocCount()); + if (delete) { + assertEquals("c", result.getBuckets().get(1).getKeyAsString()); + } else { + assertEquals("b", result.getBuckets().get(1).getKeyAsString()); + } + assertEquals(0L, result.getBuckets().get(1).getDocCount()); + }, new AggTestConfig(aggregationBuilder, fieldType).withQuery(new TermQuery(new Term("string", "e")))); + } + } + } + public void testManyTerms() throws Exception { MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string", randomBoolean(), true, Collections.emptyMap()); TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").executionHint(randomHint()).field("string"); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index 704d8b75d9ed3..c0866fa7ea694 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -1013,6 +1013,10 @@ public void testZeroMinDocAggregation() throws Exception { prepareIndex("test").setId("2").setSource("color", "yellow", "fruit", "banana", "count", -2).setRefreshPolicy(IMMEDIATE).get(); prepareIndex("test").setId("3").setSource("color", "green", "fruit", "grape", "count", -3).setRefreshPolicy(IMMEDIATE).get(); prepareIndex("test").setId("4").setSource("color", "red", "fruit", "grape", "count", -4).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("5") + .setSource("color", new String[] { "green", "black" }, "fruit", "grape", "count", -5) + .setRefreshPolicy(IMMEDIATE) + .get(); indicesAdmin().prepareForceMerge("test").get(); assertResponse( From ff9a8eb705f68ebe639d7a3bf8edbde678dcdf0a Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Thu, 18 Jul 2024 20:19:11 -0400 Subject: [PATCH 084/406] [8.15] Directly download commercial ip geolocation databases from providers (#110844) (#111077) Co-authored-by: Keith Massey --- docs/changelog/110844.yaml | 5 + .../authorization/privileges.asciidoc | 2 +- .../geoip/EnterpriseGeoIpDownloaderIT.java | 194 +++++++ .../ingest/geoip/GeoIpDownloaderIT.java | 30 +- .../src/main/java/module-info.java | 1 + .../ingest/geoip/DatabaseNodeService.java | 69 ++- .../geoip/EnterpriseGeoIpDownloader.java | 474 +++++++++++++++ ...EnterpriseGeoIpDownloaderTaskExecutor.java | 257 +++++++++ .../geoip/EnterpriseGeoIpTaskState.java | 153 +++++ .../ingest/geoip/GeoIpDownloader.java | 10 +- .../geoip/GeoIpDownloaderTaskExecutor.java | 2 +- .../ingest/geoip/GeoIpTaskState.java | 38 +- .../ingest/geoip/HttpClient.java | 26 + .../ingest/geoip/IngestGeoIpMetadata.java | 157 +++++ .../ingest/geoip/IngestGeoIpPlugin.java | 75 ++- .../geoip/direct/DatabaseConfiguration.java | 209 +++++++ .../direct/DatabaseConfigurationMetadata.java | 84 +++ .../DeleteDatabaseConfigurationAction.java | 70 +++ .../GetDatabaseConfigurationAction.java | 142 +++++ .../PutDatabaseConfigurationAction.java | 87 +++ ...RestDeleteDatabaseConfigurationAction.java | 46 ++ .../RestGetDatabaseConfigurationAction.java | 47 ++ .../RestPutDatabaseConfigurationAction.java | 52 ++ ...portDeleteDatabaseConfigurationAction.java | 128 +++++ ...ansportGetDatabaseConfigurationAction.java | 109 ++++ ...ansportPutDatabaseConfigurationAction.java | 178 ++++++ .../geoip/EnterpriseGeoIpDownloaderTests.java | 538 ++++++++++++++++++ ...priseGeoIpTaskStateSerializationTests.java | 72 +++ .../ingest/geoip/GeoIpDownloaderTests.java | 49 ++ .../geoip/IngestGeoIpMetadataTests.java | 91 +++ .../DatabaseConfigurationMetadataTests.java | 74 +++ .../direct/DatabaseConfigurationTests.java | 86 +++ ...rtPutDatabaseConfigurationActionTests.java | 69 +++ .../IngestGeoIpClientYamlTestSuiteIT.java | 5 + .../test/ingest_geoip/40_geoip_databases.yml | 72 +++ .../api/ingest.delete_geoip_database.json | 31 + .../api/ingest.get_geoip_database.json | 37 ++ .../api/ingest.put_geoip_database.json | 35 ++ server/src/main/java/module-info.java | 2 + .../org/elasticsearch/TransportVersions.java | 1 + .../ingest/EnterpriseGeoIpTask.java | 86 +++ .../ingest/IngestGeoIpFeatures.java | 22 + ...lasticsearch.features.FeatureSpecification | 1 + .../ingest/IngestServiceTests.java | 6 +- .../geoip/EnterpriseGeoIpHttpFixture.java | 125 ++++ .../resources/geoip-fixture/GeoIP2-City.tgz | Bin 0 -> 6377 bytes .../elasticsearch/xpack/core/XPackField.java | 1 + .../geoip-enterprise-downloader/build.gradle | 19 + .../geoip/EnterpriseDownloaderPlugin.java | 48 ++ ...erpriseGeoIpDownloaderLicenseListener.java | 145 +++++ ...seGeoIpDownloaderLicenseListenerTests.java | 219 +++++++ .../xpack/security/operator/Constants.java | 3 + 52 files changed, 4428 insertions(+), 54 deletions(-) create mode 100644 docs/changelog/110844.yaml create mode 100644 modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadata.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DeleteDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestDeleteDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestGetDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestPutDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskStateSerializationTests.java create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java create mode 100644 modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java create mode 100644 modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_geoip_database.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json create mode 100644 server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java create mode 100644 server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java create mode 100644 test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java create mode 100644 test/fixtures/geoip-fixture/src/main/resources/geoip-fixture/GeoIP2-City.tgz create mode 100644 x-pack/plugin/geoip-enterprise-downloader/build.gradle create mode 100644 x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseDownloaderPlugin.java create mode 100644 x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java create mode 100644 x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java diff --git a/docs/changelog/110844.yaml b/docs/changelog/110844.yaml new file mode 100644 index 0000000000000..ea879f13f3e67 --- /dev/null +++ b/docs/changelog/110844.yaml @@ -0,0 +1,5 @@ +pr: 110844 +summary: Directly download commercial ip geolocation databases from providers +area: Ingest Node +type: feature +issues: [] diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index 44897baa8cb4a..145bd8ebc06bb 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -282,7 +282,7 @@ status of {Ilm} This privilege is not available in {serverless-full}. `read_pipeline`:: -Read-only access to ingest pipline (get, simulate). +Read-only access to ingest pipeline (get, simulate). `read_slm`:: All read-only {slm-init} actions, such as getting policies and checking the diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java new file mode 100644 index 0000000000000..d9665e180d960 --- /dev/null +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import fixture.geoip.EnterpriseGeoIpHttpFixture; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.ingest.EnterpriseGeoIpTask; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; +import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER; +import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloaderTaskExecutor.MAXMIND_LICENSE_KEY_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class EnterpriseGeoIpDownloaderIT extends ESIntegTestCase { + + private static final String DATABASE_TYPE = "GeoIP2-City"; + private static final boolean useFixture = Booleans.parseBoolean(System.getProperty("geoip_use_service", "false")) == false; + + @ClassRule + public static final EnterpriseGeoIpHttpFixture fixture = new EnterpriseGeoIpHttpFixture(useFixture, DATABASE_TYPE); + + protected String getEndpoint() { + return useFixture ? fixture.getAddress() : null; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(MAXMIND_LICENSE_KEY_SETTING.getKey(), "license_key"); + Settings.Builder builder = Settings.builder(); + builder.setSecureSettings(secureSettings) + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true); + if (getEndpoint() != null) { + // note: this is using the enterprise fixture for the regular downloader, too, as + // a slightly hacky way of making the regular downloader not actually download any files + builder.put(GeoIpDownloader.ENDPOINT_SETTING.getKey(), getEndpoint()); + } + return builder.build(); + } + + @SuppressWarnings("unchecked") + protected Collection> nodePlugins() { + // the reindex plugin is (somewhat surprisingly) necessary in order to be able to delete-by-query, + // which modules/ingest-geoip does to delete old chunks + return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), IngestGeoIpPlugin.class, ReindexPlugin.class); + } + + @SuppressWarnings("unchecked") + public void testEnterpriseDownloaderTask() throws Exception { + /* + * This test starts the enterprise geoip downloader task, and creates a database configuration. Then it creates an ingest + * pipeline that references that database, and ingests a single document using that pipeline. It then asserts that the document + * was updated with information from the database. + * Note that the "enterprise database" is actually just a geolite database being loaded by the GeoIpHttpFixture. + */ + if (getEndpoint() != null) { + EnterpriseGeoIpDownloader.DEFAULT_MAXMIND_ENDPOINT = getEndpoint(); + } + final String pipelineName = "enterprise_geoip_pipeline"; + final String indexName = "enterprise_geoip_test_index"; + final String sourceField = "ip"; + final String targetField = "ip-city"; + + startEnterpriseGeoIpDownloaderTask(); + configureDatabase(DATABASE_TYPE); + createGeoIpPipeline(pipelineName, DATABASE_TYPE, sourceField, targetField); + String documentId = ingestDocument(indexName, pipelineName, sourceField); + GetResponse getResponse = client().get(new GetRequest(indexName, documentId)).actionGet(); + Map returnedSource = getResponse.getSource(); + assertNotNull(returnedSource); + Object targetFieldValue = returnedSource.get(targetField); + assertNotNull(targetFieldValue); + assertThat(((Map) targetFieldValue).get("organization_name"), equalTo("Bredband2 AB")); + } + + private void startEnterpriseGeoIpDownloaderTask() { + PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); + persistentTasksService.sendStartRequest( + ENTERPRISE_GEOIP_DOWNLOADER, + ENTERPRISE_GEOIP_DOWNLOADER, + new EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams(), + TimeValue.MAX_VALUE, + ActionListener.wrap(r -> logger.debug("Started enterprise geoip downloader task"), e -> { + Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e; + if (t instanceof ResourceAlreadyExistsException == false) { + logger.error("failed to create enterprise geoip downloader task", e); + } + }) + ); + } + + private void configureDatabase(String databaseType) throws Exception { + admin().cluster() + .execute( + PutDatabaseConfigurationAction.INSTANCE, + new PutDatabaseConfigurationAction.Request( + TimeValue.MAX_VALUE, + TimeValue.MAX_VALUE, + new DatabaseConfiguration("test", databaseType, new DatabaseConfiguration.Maxmind("test_account")) + ) + ) + .actionGet(); + ensureGreen(GeoIpDownloader.DATABASES_INDEX); + assertBusy(() -> { + SearchResponse searchResponse = client().search(new SearchRequest(GeoIpDownloader.DATABASES_INDEX)).actionGet(); + try { + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + } finally { + searchResponse.decRef(); + } + }); + } + + private void createGeoIpPipeline(String pipelineName, String databaseType, String sourceField, String targetField) throws IOException { + final BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.field("description", "test"); + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject("geoip"); + { + builder.field("field", sourceField); + builder.field("target_field", targetField); + builder.field("database_file", databaseType + ".mmdb"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + assertAcked(clusterAdmin().putPipeline(new PutPipelineRequest(pipelineName, bytes, XContentType.JSON)).actionGet()); + } + + private String ingestDocument(String indexName, String pipelineName, String sourceField) { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add( + new IndexRequest(indexName).source("{\"" + sourceField + "\": \"89.160.20.128\"}", XContentType.JSON).setPipeline(pipelineName) + ); + BulkResponse response = client().bulk(bulkRequest).actionGet(); + BulkItemResponse[] bulkItemResponses = response.getItems(); + assertThat(bulkItemResponses.length, equalTo(1)); + assertThat(bulkItemResponses[0].status(), equalTo(RestStatus.CREATED)); + return bulkItemResponses[0].getId(); + } +} diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 9eab00fbadf20..f7ab384c69bf1 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -152,9 +152,9 @@ public void testInvalidTimestamp() throws Exception { updateClusterSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)); assertBusy(() -> { GeoIpTaskState state = getGeoIpTaskState(); - assertEquals( - Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"), - state.getDatabases().keySet() + assertThat( + state.getDatabases().keySet(), + containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb") ); }, 2, TimeUnit.MINUTES); @@ -227,9 +227,9 @@ public void testGeoIpDatabasesDownload() throws Exception { updateClusterSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)); assertBusy(() -> { GeoIpTaskState state = getGeoIpTaskState(); - assertEquals( - Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"), - state.getDatabases().keySet() + assertThat( + state.getDatabases().keySet(), + containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb") ); putGeoIpPipeline(); // This is to work around the race condition described in #92888 }, 2, TimeUnit.MINUTES); @@ -238,9 +238,9 @@ public void testGeoIpDatabasesDownload() throws Exception { assertBusy(() -> { try { GeoIpTaskState state = (GeoIpTaskState) getTask().getState(); - assertEquals( - Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"), - state.getDatabases().keySet() + assertThat( + state.getDatabases().keySet(), + containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb") ); GeoIpTaskState.Metadata metadata = state.getDatabases().get(id); int size = metadata.lastChunk() - metadata.firstChunk() + 1; @@ -301,9 +301,9 @@ public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { assertNotNull(getTask().getState()); // removing all geoip processors should not result in the task being stopped assertBusy(() -> { GeoIpTaskState state = getGeoIpTaskState(); - assertEquals( - Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"), - state.getDatabases().keySet() + assertThat( + state.getDatabases().keySet(), + containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb") ); }); } @@ -337,9 +337,9 @@ public void testDoNotDownloadDatabaseOnPipelineCreation() throws Exception { assertAcked(indicesAdmin().prepareUpdateSettings(indexIdentifier).setSettings(indexSettings).get()); assertBusy(() -> { GeoIpTaskState state = getGeoIpTaskState(); - assertEquals( - Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"), - state.getDatabases().keySet() + assertThat( + state.getDatabases().keySet(), + containsInAnyOrder("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb") ); }, 2, TimeUnit.MINUTES); diff --git a/modules/ingest-geoip/src/main/java/module-info.java b/modules/ingest-geoip/src/main/java/module-info.java index fa0b0266414f0..4d0acefcb6c9f 100644 --- a/modules/ingest-geoip/src/main/java/module-info.java +++ b/modules/ingest-geoip/src/main/java/module-info.java @@ -15,5 +15,6 @@ requires com.maxmind.geoip2; requires com.maxmind.db; + exports org.elasticsearch.ingest.geoip.direct to org.elasticsearch.server; exports org.elasticsearch.ingest.geoip.stats to org.elasticsearch.server; } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java index efae8fa0c50ca..dcb882ede230c 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java @@ -24,6 +24,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; @@ -52,7 +53,6 @@ import java.util.Collection; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -64,6 +64,7 @@ import java.util.zip.GZIPInputStream; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpTaskState.getEnterpriseGeoIpTaskState; import static org.elasticsearch.ingest.geoip.GeoIpTaskState.getGeoIpTaskState; /** @@ -183,13 +184,14 @@ public Boolean isValid(String databaseFile) { if (state == null) { return true; } + GeoIpTaskState.Metadata metadata = state.getDatabases().get(databaseFile); // we never remove metadata from cluster state, if metadata is null we deal with built-in database, which is always valid if (metadata == null) { return true; } - boolean valid = metadata.isValid(currentState.metadata().settings()); + boolean valid = metadata.isNewEnough(currentState.metadata().settings()); if (valid && metadata.isCloseToExpiration()) { HeaderWarning.addWarning( "database [{}] was not updated for over 25 days, geoip processor will stop working if there is no update for 30 days", @@ -269,20 +271,52 @@ void checkDatabases(ClusterState state) { } } - GeoIpTaskState taskState = getGeoIpTaskState(state); - if (taskState == null) { - // Note: an empty state will purge stale entries in databases map - taskState = GeoIpTaskState.EMPTY; + // we'll consult each of the geoip downloaders to build up a list of database metadatas to work with + List> validMetadatas = new ArrayList<>(); + + // process the geoip task state for the (ordinary) geoip downloader + { + GeoIpTaskState taskState = getGeoIpTaskState(state); + if (taskState == null) { + // Note: an empty state will purge stale entries in databases map + taskState = GeoIpTaskState.EMPTY; + } + validMetadatas.addAll( + taskState.getDatabases() + .entrySet() + .stream() + .filter(e -> e.getValue().isNewEnough(state.getMetadata().settings())) + .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue())) + .toList() + ); + } + + // process the geoip task state for the enterprise geoip downloader + { + EnterpriseGeoIpTaskState taskState = getEnterpriseGeoIpTaskState(state); + if (taskState == null) { + // Note: an empty state will purge stale entries in databases map + taskState = EnterpriseGeoIpTaskState.EMPTY; + } + validMetadatas.addAll( + taskState.getDatabases() + .entrySet() + .stream() + .filter(e -> e.getValue().isNewEnough(state.getMetadata().settings())) + .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue())) + .toList() + ); } - taskState.getDatabases().entrySet().stream().filter(e -> e.getValue().isValid(state.getMetadata().settings())).forEach(e -> { - String name = e.getKey(); - GeoIpTaskState.Metadata metadata = e.getValue(); + // run through all the valid metadatas, regardless of source, and retrieve them + validMetadatas.forEach(e -> { + String name = e.v1(); + GeoIpTaskState.Metadata metadata = e.v2(); DatabaseReaderLazyLoader reference = databases.get(name); String remoteMd5 = metadata.md5(); String localMd5 = reference != null ? reference.getMd5() : null; if (Objects.equals(localMd5, remoteMd5)) { - logger.debug("Current reference of [{}] is up to date [{}] with was recorded in CS [{}]", name, localMd5, remoteMd5); + logger.debug("[{}] is up to date [{}] with cluster state [{}]", name, localMd5, remoteMd5); return; } @@ -293,15 +327,14 @@ void checkDatabases(ClusterState state) { } }); + // TODO perhaps we need to handle the license flap persistent task state better than we do + // i think the ideal end state is that we *do not* drop the files that the enterprise downloader + // handled if they fall out -- which means we need to track that in the databases map itself + + // start with the list of all databases we currently know about in this service, + // then drop the ones that didn't check out as valid from the task states List staleEntries = new ArrayList<>(databases.keySet()); - staleEntries.removeAll( - taskState.getDatabases() - .entrySet() - .stream() - .filter(e -> e.getValue().isValid(state.getMetadata().settings())) - .map(Map.Entry::getKey) - .collect(Collectors.toSet()) - ); + staleEntries.removeAll(validMetadatas.stream().map(Tuple::v1).collect(Collectors.toSet())); removeStaleEntries(staleEntries); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java new file mode 100644 index 0000000000000..9645e34751642 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java @@ -0,0 +1,474 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.ingest.geoip.GeoIpTaskState.Metadata; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.io.InputStream; +import java.net.PasswordAuthentication; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloaderTaskExecutor.MAXMIND_SETTINGS_PREFIX; + +/** + * Main component responsible for downloading new GeoIP databases. + * New databases are downloaded in chunks and stored in .geoip_databases index + * Downloads are verified against MD5 checksum provided by the server + * Current state of all stored databases is stored in cluster state in persistent task state + */ +public class EnterpriseGeoIpDownloader extends AllocatedPersistentTask { + + private static final Logger logger = LogManager.getLogger(EnterpriseGeoIpDownloader.class); + private static final Pattern CHECKSUM_PATTERN = Pattern.compile("(\\w{64})\\s\\s(.*)"); + + // for overriding in tests + static String DEFAULT_MAXMIND_ENDPOINT = System.getProperty( + MAXMIND_SETTINGS_PREFIX + "endpoint.default", + "https://download.maxmind.com/geoip/databases" + ); + // n.b. a future enhancement might be to allow for a MAXMIND_ENDPOINT_SETTING, but + // at the moment this is an unsupported system property for use in tests (only) + + static String downloadUrl(final String name, final String suffix) { + String endpointPattern = DEFAULT_MAXMIND_ENDPOINT; + if (endpointPattern.contains("%")) { + throw new IllegalArgumentException("Invalid endpoint [" + endpointPattern + "]"); + } + if (endpointPattern.endsWith("/") == false) { + endpointPattern += "/"; + } + endpointPattern += "%s/download?suffix=%s"; + + // at this point the pattern looks like this (in the default case): + // https://download.maxmind.com/geoip/databases/%s/download?suffix=%s + + return Strings.format(endpointPattern, name, suffix); + } + + static final String DATABASES_INDEX = ".geoip_databases"; + static final int MAX_CHUNK_SIZE = 1024 * 1024; + + private final Client client; + private final HttpClient httpClient; + private final ClusterService clusterService; + private final ThreadPool threadPool; + + // visible for testing + protected volatile EnterpriseGeoIpTaskState state; + private volatile Scheduler.ScheduledCancellable scheduled; + private final Supplier pollIntervalSupplier; + private final Function credentialsBuilder; + + EnterpriseGeoIpDownloader( + Client client, + HttpClient httpClient, + ClusterService clusterService, + ThreadPool threadPool, + long id, + String type, + String action, + String description, + TaskId parentTask, + Map headers, + Supplier pollIntervalSupplier, + Function credentialsBuilder + ) { + super(id, type, action, description, parentTask, headers); + this.client = client; + this.httpClient = httpClient; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.pollIntervalSupplier = pollIntervalSupplier; + this.credentialsBuilder = credentialsBuilder; + } + + void setState(EnterpriseGeoIpTaskState state) { + // this is for injecting the state in GeoIpDownloaderTaskExecutor#nodeOperation just after the task instance has been created + // by the PersistentTasksNodeService -- since the GeoIpDownloader is newly created, the state will be null, and the passed-in + // state cannot be null + assert this.state == null + : "setState() cannot be called when state is already non-null. This most likely happened because setState() was called twice"; + assert state != null : "Should never call setState with a null state. Pass an EnterpriseGeoIpTaskState.EMPTY instead."; + this.state = state; + } + + // visible for testing + void updateDatabases() throws IOException { + var clusterState = clusterService.state(); + var geoipIndex = clusterState.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX); + if (geoipIndex != null) { + logger.trace("the geoip index [{}] exists", EnterpriseGeoIpDownloader.DATABASES_INDEX); + if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) { + logger.debug("not updating databases because not all primary shards of [{}] index are active yet", DATABASES_INDEX); + return; + } + var blockException = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, geoipIndex.getWriteIndex().getName()); + if (blockException != null) { + throw blockException; + } + } + + logger.trace("Updating geoip databases"); + IngestGeoIpMetadata geoIpMeta = clusterState.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + + // if there are entries in the cs that aren't in the persistent task state, + // then download those (only) + // --- + // if there are in the persistent task state, that aren't in the cluster state + // then nuke those (only) + // --- + // else, just download everything + boolean addedSomething = false; + { + Set existingDatabaseNames = state.getDatabases().keySet(); + for (Map.Entry entry : geoIpMeta.getDatabases().entrySet()) { + final String id = entry.getKey(); + DatabaseConfiguration database = entry.getValue().database(); + if (existingDatabaseNames.contains(database.name() + ".mmdb") == false) { + logger.debug("A new database appeared [{}]", database.name()); + + final String accountId = database.maxmind().accountId(); + try (HttpClient.PasswordAuthenticationHolder holder = credentialsBuilder.apply(accountId)) { + if (holder == null) { + logger.warn("No credentials found to download database [{}], skipping download...", id); + } else { + processDatabase(holder.get(), database); + addedSomething = true; + } + } + } + } + } + + boolean droppedSomething = false; + { + // rip anything out of the task state that doesn't match what's in the cluster state, + // that is, if there's no longer an entry for a database in the repository, + // then drop it from the task state, too + Set databases = geoIpMeta.getDatabases() + .values() + .stream() + .map(c -> c.database().name() + ".mmdb") + .collect(Collectors.toSet()); + EnterpriseGeoIpTaskState _state = state; + Collection> metas = _state.getDatabases() + .entrySet() + .stream() + .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue())) + .toList(); + for (Tuple metaTuple : metas) { + String name = metaTuple.v1(); + Metadata meta = metaTuple.v2(); + if (databases.contains(name) == false) { + logger.debug("Dropping [{}], databases was {}", name, databases); + _state = _state.remove(name); + deleteOldChunks(name, meta.lastChunk() + 1); + droppedSomething = true; + } + } + if (droppedSomething) { + state = _state; + updateTaskState(); + } + } + + if (addedSomething == false && droppedSomething == false) { + RuntimeException accumulator = null; + for (Map.Entry entry : geoIpMeta.getDatabases().entrySet()) { + final String id = entry.getKey(); + DatabaseConfiguration database = entry.getValue().database(); + + final String accountId = database.maxmind().accountId(); + try (HttpClient.PasswordAuthenticationHolder holder = credentialsBuilder.apply(accountId)) { + if (holder == null) { + logger.warn("No credentials found to download database [{}], skipping download...", id); + } else { + processDatabase(holder.get(), database); + } + } catch (Exception e) { + accumulator = ExceptionsHelper.useOrSuppress(accumulator, ExceptionsHelper.convertToRuntime(e)); + } + } + if (accumulator != null) { + throw accumulator; + } + } + } + + /** + * This method fetches the sha256 file and tar.gz file for the given database from the Maxmind endpoint, then indexes that tar.gz + * file into the .geoip_databases Elasticsearch index, deleting any old versions of the database tar.gz from the index if they exist. + * If the computed sha256 does not match the expected sha256, an error will be logged and the database will not be put into the + * Elasticsearch index. + *

+ * As an implementation detail, this method retrieves the sha256 checksum of the database to download and then invokes + * {@link EnterpriseGeoIpDownloader#processDatabase(PasswordAuthentication, String, String, String)} with that checksum, deferring to + * that method to actually download and process the tar.gz itself. + * + * @param auth The credentials to use to download from the Maxmind endpoint + * @param database The database to be downloaded from Maxmind and indexed into an Elasticsearch index + * @throws IOException If there is an error fetching the sha256 file + */ + void processDatabase(PasswordAuthentication auth, DatabaseConfiguration database) throws IOException { + final String name = database.name(); + logger.debug("Processing database [{}] for configuration [{}]", name, database.id()); + + final String sha256Url = downloadUrl(name, "tar.gz.sha256"); + final String tgzUrl = downloadUrl(name, "tar.gz"); + + String result = new String(httpClient.getBytes(auth, sha256Url), StandardCharsets.UTF_8).trim(); // this throws if the auth is bad + var matcher = CHECKSUM_PATTERN.matcher(result); + boolean match = matcher.matches(); + if (match == false) { + throw new RuntimeException("Unexpected sha256 response from [" + sha256Url + "]"); + } + final String sha256 = matcher.group(1); + // the name that comes from the enterprise downloader cluster state doesn't include the .mmdb extension, + // but the downloading and indexing of database code expects it to be there, so we add it on here before further processing + processDatabase(auth, name + ".mmdb", sha256, tgzUrl); + } + + /** + * This method fetches the tar.gz file for the given database from the Maxmind endpoint, then indexes that tar.gz + * file into the .geoip_databases Elasticsearch index, deleting any old versions of the database tar.gz from the index if they exist. + * + * @param auth The credentials to use to download from the Maxmind endpoint + * The name of the database to be downloaded from Maxmind and indexed into an Elasticsearch index + * @param sha256 The sha256 to compare to the computed sha256 of the downloaded tar.gz file + * @param url The URL for the Maxmind endpoint from which the database's tar.gz will be downloaded + */ + private void processDatabase(PasswordAuthentication auth, String name, String sha256, String url) { + Metadata metadata = state.getDatabases().getOrDefault(name, Metadata.EMPTY); + if (Objects.equals(metadata.sha256(), sha256)) { + updateTimestamp(name, metadata); + return; + } + logger.debug("downloading geoip database [{}]", name); + long start = System.currentTimeMillis(); + try (InputStream is = httpClient.get(auth, url)) { + int firstChunk = metadata.lastChunk() + 1; // if there is no metadata, then Metadata.EMPTY + 1 = 0 + Tuple tuple = indexChunks(name, is, firstChunk, MessageDigests.sha256(), sha256, start); + int lastChunk = tuple.v1(); + String md5 = tuple.v2(); + if (lastChunk > firstChunk) { + state = state.put(name, new Metadata(start, firstChunk, lastChunk - 1, md5, start, sha256)); + updateTaskState(); + logger.info("successfully downloaded geoip database [{}]", name); + deleteOldChunks(name, firstChunk); + } + } catch (Exception e) { + logger.error(() -> "error downloading geoip database [" + name + "]", e); + } + } + + // visible for testing + void deleteOldChunks(String name, int firstChunk) { + BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(new MatchQueryBuilder("name", name)) + .filter(new RangeQueryBuilder("chunk").to(firstChunk, false)); + DeleteByQueryRequest request = new DeleteByQueryRequest(); + request.indices(DATABASES_INDEX); + request.setQuery(queryBuilder); + client.execute( + DeleteByQueryAction.INSTANCE, + request, + ActionListener.wrap(r -> {}, e -> logger.warn("could not delete old chunks for geoip database [" + name + "]", e)) + ); + } + + // visible for testing + protected void updateTimestamp(String name, Metadata old) { + logger.debug("geoip database [{}] is up to date, updated timestamp", name); + state = state.put( + name, + new Metadata(old.lastUpdate(), old.firstChunk(), old.lastChunk(), old.md5(), System.currentTimeMillis(), old.sha256()) + ); + updateTaskState(); + } + + void updateTaskState() { + PlainActionFuture> future = new PlainActionFuture<>(); + updatePersistentTaskState(state, future); + state = ((EnterpriseGeoIpTaskState) future.actionGet().getState()); + } + + // visible for testing + Tuple indexChunks( + String name, + InputStream is, + int chunk, + @Nullable MessageDigest digest, + String expectedChecksum, + long timestamp + ) throws IOException { + MessageDigest md5 = MessageDigests.md5(); + for (byte[] buf = getChunk(is); buf.length != 0; buf = getChunk(is)) { + md5.update(buf); + if (digest != null) { + digest.update(buf); + } + IndexRequest indexRequest = new IndexRequest(DATABASES_INDEX).id(name + "_" + chunk + "_" + timestamp) + .create(true) + .source(XContentType.SMILE, "name", name, "chunk", chunk, "data", buf); + client.index(indexRequest).actionGet(); + chunk++; + } + + // May take some time before automatic flush kicks in: + // (otherwise the translog will contain large documents for some time without good reason) + FlushRequest flushRequest = new FlushRequest(DATABASES_INDEX); + client.admin().indices().flush(flushRequest).actionGet(); + // Ensure that the chunk documents are visible: + RefreshRequest refreshRequest = new RefreshRequest(DATABASES_INDEX); + client.admin().indices().refresh(refreshRequest).actionGet(); + + String actualMd5 = MessageDigests.toHexString(md5.digest()); + String actualChecksum = digest == null ? actualMd5 : MessageDigests.toHexString(digest.digest()); + if (Objects.equals(expectedChecksum, actualChecksum) == false) { + throw new IOException("checksum mismatch, expected [" + expectedChecksum + "], actual [" + actualChecksum + "]"); + } + return Tuple.tuple(chunk, actualMd5); + } + + // visible for testing + static byte[] getChunk(InputStream is) throws IOException { + byte[] buf = new byte[MAX_CHUNK_SIZE]; + int chunkSize = 0; + while (chunkSize < MAX_CHUNK_SIZE) { + int read = is.read(buf, chunkSize, MAX_CHUNK_SIZE - chunkSize); + if (read == -1) { + break; + } + chunkSize += read; + } + if (chunkSize < MAX_CHUNK_SIZE) { + buf = Arrays.copyOf(buf, chunkSize); + } + return buf; + } + + /** + * Downloads the geoip databases now, and schedules them to be downloaded again after pollInterval. + */ + synchronized void runDownloader() { + // by the time we reach here, the state will never be null + assert this.state != null : "this.setState() is null. You need to call setState() before calling runDownloader()"; + + // there's a race condition between here and requestReschedule. originally this scheduleNextRun call was at the end of this + // block, but remember that updateDatabases can take seconds to run (it's downloading bytes from the internet), and so during the + // very first run there would be no future run scheduled to reschedule in requestReschedule. which meant that if you went from zero + // to N(>=2) databases in quick succession, then all but the first database wouldn't necessarily get downloaded, because the + // requestReschedule call in the EnterpriseGeoIpDownloaderTaskExecutor's clusterChanged wouldn't have a scheduled future run to + // reschedule. scheduling the next run at the beginning of this run means that there's a much smaller window (milliseconds?, rather + // than seconds) in which such a race could occur. technically there's a window here, still, but i think it's _greatly_ reduced. + scheduleNextRun(pollIntervalSupplier.get()); + // TODO regardless of the above comment, i like the idea of checking the lowest last-checked time and then running the math to get + // to the next interval from then -- maybe that's a neat future enhancement to add + + if (isCancelled() || isCompleted()) { + return; + } + try { + updateDatabases(); // n.b. this downloads bytes from the internet, it can take a while + } catch (Exception e) { + logger.error("exception during geoip databases update", e); + } + try { + cleanDatabases(); + } catch (Exception e) { + logger.error("exception during geoip databases cleanup", e); + } + } + + /** + * This method requests that the downloader be rescheduled to run immediately (presumably because a dynamic property supplied by + * pollIntervalSupplier or eagerDownloadSupplier has changed, or a pipeline with a geoip processor has been added). This method does + * nothing if this task is cancelled, completed, or has not yet been scheduled to run for the first time. It cancels any existing + * scheduled run. + */ + public void requestReschedule() { + if (isCancelled() || isCompleted()) { + return; + } + if (scheduled != null && scheduled.cancel()) { + scheduleNextRun(TimeValue.ZERO); + } + } + + private void cleanDatabases() { + List> expiredDatabases = state.getDatabases() + .entrySet() + .stream() + .filter(e -> e.getValue().isNewEnough(clusterService.state().metadata().settings()) == false) + .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue())) + .toList(); + expiredDatabases.forEach(e -> { + String name = e.v1(); + Metadata meta = e.v2(); + deleteOldChunks(name, meta.lastChunk() + 1); + state = state.put(name, new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1)); + updateTaskState(); + }); + } + + @Override + protected void onCancelled() { + if (scheduled != null) { + scheduled.cancel(); + } + markAsCompleted(); + } + + private void scheduleNextRun(TimeValue time) { + if (threadPool.scheduler().isShutdown() == false) { + scheduled = threadPool.schedule(this::runDownloader, time, threadPool.generic()); + } + } + +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java new file mode 100644 index 0000000000000..8fc46fe157548 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java @@ -0,0 +1,257 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.ingest.EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.io.InputStream; +import java.security.GeneralSecurityException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER; +import static org.elasticsearch.ingest.geoip.GeoIpDownloaderTaskExecutor.ENABLED_SETTING; +import static org.elasticsearch.ingest.geoip.GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING; + +public class EnterpriseGeoIpDownloaderTaskExecutor extends PersistentTasksExecutor + implements + ClusterStateListener { + private static final Logger logger = LogManager.getLogger(EnterpriseGeoIpDownloader.class); + + static final String MAXMIND_SETTINGS_PREFIX = "ingest.geoip.downloader.maxmind."; + + public static final Setting MAXMIND_LICENSE_KEY_SETTING = SecureSetting.secureString( + MAXMIND_SETTINGS_PREFIX + "license_key", + null + ); + + private final Client client; + private final HttpClient httpClient; + private final ClusterService clusterService; + private final ThreadPool threadPool; + private final Settings settings; + private volatile TimeValue pollInterval; + private final AtomicReference currentTask = new AtomicReference<>(); + + private volatile SecureSettings cachedSecureSettings; + + EnterpriseGeoIpDownloaderTaskExecutor(Client client, HttpClient httpClient, ClusterService clusterService, ThreadPool threadPool) { + super(ENTERPRISE_GEOIP_DOWNLOADER, threadPool.generic()); + this.client = new OriginSettingClient(client, IngestService.INGEST_ORIGIN); + this.httpClient = httpClient; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.settings = clusterService.getSettings(); + this.pollInterval = POLL_INTERVAL_SETTING.get(settings); + + // do an initial load using the node settings + reload(clusterService.getSettings()); + } + + /** + * This method completes the initialization of the EnterpriseGeoIpDownloaderTaskExecutor by registering several listeners. + */ + public void init() { + clusterService.addListener(this); + clusterService.getClusterSettings().addSettingsUpdateConsumer(POLL_INTERVAL_SETTING, this::setPollInterval); + } + + private void setPollInterval(TimeValue pollInterval) { + if (Objects.equals(this.pollInterval, pollInterval) == false) { + this.pollInterval = pollInterval; + EnterpriseGeoIpDownloader currentDownloader = getCurrentTask(); + if (currentDownloader != null) { + currentDownloader.requestReschedule(); + } + } + } + + private HttpClient.PasswordAuthenticationHolder buildCredentials(final String username) { + final char[] passwordChars; + if (cachedSecureSettings.getSettingNames().contains(MAXMIND_LICENSE_KEY_SETTING.getKey())) { + passwordChars = cachedSecureSettings.getString(MAXMIND_LICENSE_KEY_SETTING.getKey()).getChars(); + } else { + passwordChars = null; + } + + // if the username is missing, empty, or blank, return null as 'no auth' + if (username == null || username.isEmpty() || username.isBlank()) { + return null; + } + + // likewise if the password chars array is missing or empty, return null as 'no auth' + if (passwordChars == null || passwordChars.length == 0) { + return null; + } + + return new HttpClient.PasswordAuthenticationHolder(username, passwordChars); + } + + @Override + protected EnterpriseGeoIpDownloader createTask( + long id, + String type, + String action, + TaskId parentTaskId, + PersistentTasksCustomMetadata.PersistentTask taskInProgress, + Map headers + ) { + return new EnterpriseGeoIpDownloader( + client, + httpClient, + clusterService, + threadPool, + id, + type, + action, + getDescription(taskInProgress), + parentTaskId, + headers, + () -> pollInterval, + this::buildCredentials + ); + } + + @Override + protected void nodeOperation(AllocatedPersistentTask task, EnterpriseGeoIpTaskParams params, PersistentTaskState state) { + EnterpriseGeoIpDownloader downloader = (EnterpriseGeoIpDownloader) task; + EnterpriseGeoIpTaskState geoIpTaskState = (state == null) ? EnterpriseGeoIpTaskState.EMPTY : (EnterpriseGeoIpTaskState) state; + downloader.setState(geoIpTaskState); + currentTask.set(downloader); + if (ENABLED_SETTING.get(clusterService.state().metadata().settings(), settings)) { + downloader.runDownloader(); + } + } + + public EnterpriseGeoIpDownloader getCurrentTask() { + return currentTask.get(); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + EnterpriseGeoIpDownloader currentDownloader = getCurrentTask(); + if (currentDownloader != null) { + boolean hasGeoIpMetadataChanges = event.metadataChanged() + && event.changedCustomMetadataSet().contains(IngestGeoIpMetadata.TYPE); + if (hasGeoIpMetadataChanges) { + currentDownloader.requestReschedule(); // watching the cluster changed events to kick the thing off if it's not running + } + } + } + + public synchronized void reload(Settings settings) { + // `SecureSettings` are available here! cache them as they will be needed + // whenever dynamic cluster settings change and we have to rebuild the accounts + try { + this.cachedSecureSettings = extractSecureSettings(settings, List.of(MAXMIND_LICENSE_KEY_SETTING)); + } catch (GeneralSecurityException e) { + // rethrow as a runtime exception, there's logging higher up the call chain around ReloadablePlugin + throw new ElasticsearchException("Exception while reloading enterprise geoip download task executor", e); + } + } + + /** + * Extracts the {@link SecureSettings}` out of the passed in {@link Settings} object. The {@code Setting} argument has to have the + * {@code SecureSettings} open/available. Normally {@code SecureSettings} are available only under specific callstacks (eg. during node + * initialization or during a `reload` call). The returned copy can be reused freely as it will never be closed (this is a bit of + * cheating, but it is necessary in this specific circumstance). Only works for secure settings of type string (not file). + * + * @param source A {@code Settings} object with its {@code SecureSettings} open/available. + * @param securePluginSettings The list of settings to copy. + * @return A copy of the {@code SecureSettings} of the passed in {@code Settings} argument. + */ + private static SecureSettings extractSecureSettings(Settings source, List> securePluginSettings) + throws GeneralSecurityException { + // get the secure settings out + final SecureSettings sourceSecureSettings = Settings.builder().put(source, true).getSecureSettings(); + // filter and cache them... + final Map innerMap = new HashMap<>(); + if (sourceSecureSettings != null && securePluginSettings != null) { + for (final String settingKey : sourceSecureSettings.getSettingNames()) { + for (final Setting secureSetting : securePluginSettings) { + if (secureSetting.match(settingKey)) { + innerMap.put( + settingKey, + new SecureSettingValue( + sourceSecureSettings.getString(settingKey), + sourceSecureSettings.getSHA256Digest(settingKey) + ) + ); + } + } + } + } + return new SecureSettings() { + @Override + public boolean isLoaded() { + return true; + } + + @Override + public SecureString getString(String setting) { + return innerMap.get(setting).value(); + } + + @Override + public Set getSettingNames() { + return innerMap.keySet(); + } + + @Override + public InputStream getFile(String setting) { + throw new UnsupportedOperationException("A cached SecureSetting cannot be a file"); + } + + @Override + public byte[] getSHA256Digest(String setting) { + return innerMap.get(setting).sha256Digest(); + } + + @Override + public void close() throws IOException {} + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException("A cached SecureSetting cannot be serialized"); + } + }; + } + + /** + * A single-purpose record for the internal implementation of extractSecureSettings + */ + private record SecureSettingValue(SecureString value, byte[] sha256Digest) {} +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java new file mode 100644 index 0000000000000..1dd6422fd388a --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.EnterpriseGeoIpTask; +import org.elasticsearch.ingest.geoip.GeoIpTaskState.Metadata; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; +import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +class EnterpriseGeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable { + + private static final ParseField DATABASES = new ParseField("databases"); + + static final EnterpriseGeoIpTaskState EMPTY = new EnterpriseGeoIpTaskState(Map.of()); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + GEOIP_DOWNLOADER, + true, + args -> { + List> databases = (List>) args[0]; + return new EnterpriseGeoIpTaskState(databases.stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2))); + } + ); + + static { + PARSER.declareNamedObjects(constructorArg(), (p, c, name) -> Tuple.tuple(name, Metadata.fromXContent(p)), DATABASES); + } + + public static EnterpriseGeoIpTaskState fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final Map databases; + + EnterpriseGeoIpTaskState(Map databases) { + this.databases = Map.copyOf(databases); + } + + EnterpriseGeoIpTaskState(StreamInput input) throws IOException { + databases = input.readImmutableMap( + in -> new Metadata(in.readLong(), in.readVInt(), in.readVInt(), in.readString(), in.readLong(), in.readOptionalString()) + ); + } + + public EnterpriseGeoIpTaskState put(String name, Metadata metadata) { + HashMap newDatabases = new HashMap<>(databases); + newDatabases.put(name, metadata); + return new EnterpriseGeoIpTaskState(newDatabases); + } + + public EnterpriseGeoIpTaskState remove(String name) { + HashMap newDatabases = new HashMap<>(databases); + newDatabases.remove(name); + return new EnterpriseGeoIpTaskState(newDatabases); + } + + public Map getDatabases() { + return databases; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EnterpriseGeoIpTaskState that = (EnterpriseGeoIpTaskState) o; + return databases.equals(that.databases); + } + + @Override + public int hashCode() { + return Objects.hash(databases); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.startObject("databases"); + for (Map.Entry e : databases.entrySet()) { + builder.field(e.getKey(), e.getValue()); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return "enterprise-geoip-downloader"; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(databases, (o, v) -> { + o.writeLong(v.lastUpdate()); + o.writeVInt(v.firstChunk()); + o.writeVInt(v.lastChunk()); + o.writeString(v.md5()); + o.writeLong(v.lastCheck()); + o.writeOptionalString(v.sha256()); + }); + } + + /** + * Retrieves the geoip downloader's task state from the cluster state. This may return null in some circumstances, + * for example if the geoip downloader task hasn't been created yet (which it wouldn't be if it's disabled). + * + * @param state the cluster state to read the task state from + * @return the geoip downloader's task state or null if there is not a state to read + */ + @Nullable + static EnterpriseGeoIpTaskState getEnterpriseGeoIpTaskState(ClusterState state) { + PersistentTasksCustomMetadata.PersistentTask task = getTaskWithId(state, EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER); + return (task == null) ? null : (EnterpriseGeoIpTaskState) task.getState(); + } + +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 13394a2a0c7cc..ee6f2f16f051b 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -318,14 +319,15 @@ public void requestReschedule() { } private void cleanDatabases() { - List> expiredDatabases = state.getDatabases() + List> expiredDatabases = state.getDatabases() .entrySet() .stream() - .filter(e -> e.getValue().isValid(clusterService.state().metadata().settings()) == false) + .filter(e -> e.getValue().isNewEnough(clusterService.state().metadata().settings()) == false) + .map(entry -> Tuple.tuple(entry.getKey(), entry.getValue())) .toList(); expiredDatabases.forEach(e -> { - String name = e.getKey(); - Metadata meta = e.getValue(); + String name = e.v1(); + Metadata meta = e.v2(); deleteOldChunks(name, meta.lastChunk() + 1); state = state.put(name, new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1)); updateTaskState(); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 09ac488f96e2d..3f89bb1dd5c50 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -217,7 +217,7 @@ public void clusterChanged(ClusterChangedEvent event) { } boolean hasIndicesChanges = event.previousState().metadata().indices().equals(event.state().metadata().indices()) == false; - boolean hasIngestPipelineChanges = event.changedCustomMetadataSet().contains(IngestMetadata.TYPE); + boolean hasIngestPipelineChanges = event.metadataChanged() && event.changedCustomMetadataSet().contains(IngestMetadata.TYPE); if (hasIngestPipelineChanges || hasIndicesChanges) { boolean newAtLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state()); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index a405d90b24dcc..93dc345a80a2f 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -42,6 +42,10 @@ class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable { + private static boolean includeSha256(TransportVersion version) { + return version.onOrAfter(TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15); + } + private static final ParseField DATABASES = new ParseField("databases"); static final GeoIpTaskState EMPTY = new GeoIpTaskState(Map.of()); @@ -71,7 +75,16 @@ public static GeoIpTaskState fromXContent(XContentParser parser) throws IOExcept } GeoIpTaskState(StreamInput input) throws IOException { - databases = input.readImmutableMap(in -> new Metadata(in.readLong(), in.readVInt(), in.readVInt(), in.readString(), in.readLong())); + databases = input.readImmutableMap( + in -> new Metadata( + in.readLong(), + in.readVInt(), + in.readVInt(), + in.readString(), + in.readLong(), + includeSha256(in.getTransportVersion()) ? input.readOptionalString() : null + ) + ); } public GeoIpTaskState put(String name, Metadata metadata) { @@ -129,16 +142,21 @@ public void writeTo(StreamOutput out) throws IOException { o.writeVInt(v.lastChunk); o.writeString(v.md5); o.writeLong(v.lastCheck); + if (includeSha256(o.getTransportVersion())) { + o.writeOptionalString(v.sha256); + } }); } - record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck) implements ToXContentObject { + record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck, @Nullable String sha256) + implements + ToXContentObject { /** * An empty Metadata object useful for getOrDefault -type calls. Crucially, the 'lastChunk' is -1, so it's safe to use * with logic that says the new firstChunk is the old lastChunk + 1. */ - static Metadata EMPTY = new Metadata(-1, -1, -1, "", -1); + static Metadata EMPTY = new Metadata(-1, -1, -1, "", -1, null); private static final String NAME = GEOIP_DOWNLOADER + "-metadata"; private static final ParseField LAST_CHECK = new ParseField("last_check"); @@ -146,6 +164,7 @@ record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long private static final ParseField FIRST_CHUNK = new ParseField("first_chunk"); private static final ParseField LAST_CHUNK = new ParseField("last_chunk"); private static final ParseField MD5 = new ParseField("md5"); + private static final ParseField SHA256 = new ParseField("sha256"); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, @@ -155,7 +174,8 @@ record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long (int) args[1], (int) args[2], (String) args[3], - (long) (args[4] == null ? args[0] : args[4]) + (long) (args[4] == null ? args[0] : args[4]), + (String) args[5] ) ); @@ -165,6 +185,7 @@ record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long PARSER.declareInt(constructorArg(), LAST_CHUNK); PARSER.declareString(constructorArg(), MD5); PARSER.declareLong(optionalConstructorArg(), LAST_CHECK); + PARSER.declareString(optionalConstructorArg(), SHA256); } public static Metadata fromXContent(XContentParser parser) { @@ -179,11 +200,15 @@ public static Metadata fromXContent(XContentParser parser) { Objects.requireNonNull(md5); } + Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck) { + this(lastUpdate, firstChunk, lastChunk, md5, lastCheck, null); + } + public boolean isCloseToExpiration() { return Instant.ofEpochMilli(lastCheck).isBefore(Instant.now().minus(25, ChronoUnit.DAYS)); } - public boolean isValid(Settings settings) { + public boolean isNewEnough(Settings settings) { TimeValue valid = settings.getAsTime("ingest.geoip.database_validity", TimeValue.timeValueDays(30)); return Instant.ofEpochMilli(lastCheck).isAfter(Instant.now().minus(valid.getMillis(), ChronoUnit.MILLIS)); } @@ -197,6 +222,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(FIRST_CHUNK.getPreferredName(), firstChunk); builder.field(LAST_CHUNK.getPreferredName(), lastChunk); builder.field(MD5.getPreferredName(), md5); + if (sha256 != null) { // only serialize if not null, for prettiness reasons + builder.field(SHA256.getPreferredName(), sha256); + } } builder.endObject(); return builder; diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java index 8efc4dc2e74bd..2f6bd6ef20fd0 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java @@ -24,6 +24,7 @@ import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import java.util.Arrays; import java.util.Objects; import static java.net.HttpURLConnection.HTTP_MOVED_PERM; @@ -34,6 +35,31 @@ class HttpClient { + /** + * A PasswordAuthenticationHolder is just a wrapper around a PasswordAuthentication to implement AutoCloseable. + * This construction makes it possible to use a PasswordAuthentication in a try-with-resources statement, which + * makes it easier to ensure cleanup of the PasswordAuthentication is performed after it's finished being used. + */ + static final class PasswordAuthenticationHolder implements AutoCloseable { + private PasswordAuthentication auth; + + PasswordAuthenticationHolder(String username, char[] passwordChars) { + this.auth = new PasswordAuthentication(username, passwordChars); // clones the passed-in chars + } + + public PasswordAuthentication get() { + Objects.requireNonNull(auth); + return auth; + } + + @Override + public void close() { + final PasswordAuthentication clear = this.auth; + this.auth = null; // set to null and then clear it + Arrays.fill(clear.getPassword(), '\0'); // zero out the password chars + } + } + // a private sentinel value for representing the idea that there's no auth for some request. // this allows us to have a not-null requirement on the methods that do accept an auth. // if you don't want auth, then don't use those methods. ;) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java new file mode 100644 index 0000000000000..b6bfbf94fa8f7 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Holds the ingest-geoip databases that are available in the cluster state. + */ +public final class IngestGeoIpMetadata implements Metadata.Custom { + + public static final String TYPE = "ingest_geoip"; + private static final ParseField DATABASES_FIELD = new ParseField("databases"); + + public static final IngestGeoIpMetadata EMPTY = new IngestGeoIpMetadata(Map.of()); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "ingest_geoip_metadata", + a -> new IngestGeoIpMetadata( + ((List) a[0]).stream().collect(Collectors.toMap((m) -> m.database().id(), Function.identity())) + ) + ); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> DatabaseConfigurationMetadata.parse(p, n), v -> { + throw new IllegalArgumentException("ordered " + DATABASES_FIELD.getPreferredName() + " are not supported"); + }, DATABASES_FIELD); + } + + private final Map databases; + + public IngestGeoIpMetadata(Map databases) { + this.databases = Map.copyOf(databases); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15; + } + + public Map getDatabases() { + return databases; + } + + public IngestGeoIpMetadata(StreamInput in) throws IOException { + this.databases = in.readMap(StreamInput::readString, DatabaseConfigurationMetadata::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(databases, StreamOutput::writeWriteable); + } + + public static IngestGeoIpMetadata fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat(ChunkedToXContentHelper.xContentValuesMap(DATABASES_FIELD.getPreferredName(), databases)); + } + + @Override + public EnumSet context() { + return Metadata.ALL_CONTEXTS; + } + + @Override + public Diff diff(Metadata.Custom before) { + return new GeoIpMetadataDiff((IngestGeoIpMetadata) before, this); + } + + static class GeoIpMetadataDiff implements NamedDiff { + + final Diff> databases; + + GeoIpMetadataDiff(IngestGeoIpMetadata before, IngestGeoIpMetadata after) { + this.databases = DiffableUtils.diff(before.databases, after.databases, DiffableUtils.getStringKeySerializer()); + } + + GeoIpMetadataDiff(StreamInput in) throws IOException { + databases = DiffableUtils.readJdkMapDiff( + in, + DiffableUtils.getStringKeySerializer(), + DatabaseConfigurationMetadata::new, + DatabaseConfigurationMetadata::readDiffFrom + ); + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + return new IngestGeoIpMetadata(databases.apply(((IngestGeoIpMetadata) part).databases)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + databases.writeTo(out); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IngestGeoIpMetadata that = (IngestGeoIpMetadata) o; + return Objects.equals(databases, that.databases); + } + + @Override + public int hashCode() { + return Objects.hash(databases); + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 9d0f9848d97b6..e606688ad60a0 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -12,8 +12,10 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -25,8 +27,18 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.ingest.EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.geoip.direct.DeleteDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.GetDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.RestDeleteDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.RestGetDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.RestPutDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.TransportDeleteDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.TransportGetDatabaseConfigurationAction; +import org.elasticsearch.ingest.geoip.direct.TransportPutDatabaseConfigurationAction; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStats; import org.elasticsearch.ingest.geoip.stats.GeoIpStatsAction; import org.elasticsearch.ingest.geoip.stats.GeoIpStatsTransportAction; @@ -38,6 +50,7 @@ import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; @@ -57,13 +70,21 @@ import java.util.function.Supplier; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER; import static org.elasticsearch.ingest.IngestService.INGEST_ORIGIN; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX_PATTERN; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemIndexPlugin, Closeable, PersistentTaskPlugin, ActionPlugin { +public class IngestGeoIpPlugin extends Plugin + implements + IngestPlugin, + SystemIndexPlugin, + Closeable, + PersistentTaskPlugin, + ActionPlugin, + ReloadablePlugin { public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); private static final int GEOIP_INDEX_MAPPINGS_VERSION = 1; /** @@ -78,6 +99,7 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd private final SetOnce ingestService = new SetOnce<>(); private final SetOnce databaseRegistry = new SetOnce<>(); private GeoIpDownloaderTaskExecutor geoIpDownloaderTaskExecutor; + private EnterpriseGeoIpDownloaderTaskExecutor enterpriseGeoIpDownloaderTaskExecutor; @Override public List> getSettings() { @@ -86,7 +108,8 @@ public List> getSettings() { GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING, GeoIpDownloaderTaskExecutor.ENABLED_SETTING, GeoIpDownloader.ENDPOINT_SETTING, - GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING + GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING, + EnterpriseGeoIpDownloaderTaskExecutor.MAXMIND_LICENSE_KEY_SETTING ); } @@ -123,7 +146,16 @@ public Collection createComponents(PluginServices services) { services.threadPool() ); geoIpDownloaderTaskExecutor.init(); - return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor); + + enterpriseGeoIpDownloaderTaskExecutor = new EnterpriseGeoIpDownloaderTaskExecutor( + services.client(), + new HttpClient(), + services.clusterService(), + services.threadPool() + ); + enterpriseGeoIpDownloaderTaskExecutor.init(); + + return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor, enterpriseGeoIpDownloaderTaskExecutor); } @Override @@ -139,12 +171,17 @@ public List> getPersistentTasksExecutor( SettingsModule settingsModule, IndexNameExpressionResolver expressionResolver ) { - return List.of(geoIpDownloaderTaskExecutor); + return List.of(geoIpDownloaderTaskExecutor, enterpriseGeoIpDownloaderTaskExecutor); } @Override public List> getActions() { - return List.of(new ActionHandler<>(GeoIpStatsAction.INSTANCE, GeoIpStatsTransportAction.class)); + return List.of( + new ActionHandler<>(GeoIpStatsAction.INSTANCE, GeoIpStatsTransportAction.class), + new ActionHandler<>(GetDatabaseConfigurationAction.INSTANCE, TransportGetDatabaseConfigurationAction.class), + new ActionHandler<>(DeleteDatabaseConfigurationAction.INSTANCE, TransportDeleteDatabaseConfigurationAction.class), + new ActionHandler<>(PutDatabaseConfigurationAction.INSTANCE, TransportPutDatabaseConfigurationAction.class) + ); } @Override @@ -159,22 +196,41 @@ public List getRestHandlers( Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - return List.of(new RestGeoIpStatsAction()); + return List.of( + new RestGeoIpStatsAction(), + new RestGetDatabaseConfigurationAction(), + new RestDeleteDatabaseConfigurationAction(), + new RestPutDatabaseConfigurationAction() + ); } @Override public List getNamedXContent() { return List.of( new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(GEOIP_DOWNLOADER), GeoIpTaskParams::fromXContent), - new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(GEOIP_DOWNLOADER), GeoIpTaskState::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(GEOIP_DOWNLOADER), GeoIpTaskState::fromXContent), + new NamedXContentRegistry.Entry( + PersistentTaskParams.class, + new ParseField(ENTERPRISE_GEOIP_DOWNLOADER), + EnterpriseGeoIpTaskParams::fromXContent + ), + new NamedXContentRegistry.Entry( + PersistentTaskState.class, + new ParseField(ENTERPRISE_GEOIP_DOWNLOADER), + EnterpriseGeoIpTaskState::fromXContent + ) ); } @Override public List getNamedWriteables() { return List.of( + new NamedWriteableRegistry.Entry(Metadata.Custom.class, IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.GeoIpMetadataDiff::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, GEOIP_DOWNLOADER, GeoIpTaskState::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, GEOIP_DOWNLOADER, GeoIpTaskParams::new), + new NamedWriteableRegistry.Entry(PersistentTaskState.class, ENTERPRISE_GEOIP_DOWNLOADER, EnterpriseGeoIpTaskState::new), + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, ENTERPRISE_GEOIP_DOWNLOADER, EnterpriseGeoIpTaskParams::new), new NamedWriteableRegistry.Entry(Task.Status.class, GEOIP_DOWNLOADER, GeoIpDownloaderStats::new) ); } @@ -235,4 +291,9 @@ private static XContentBuilder mappings() { throw new UncheckedIOException("Failed to build mappings for " + DATABASES_INDEX, e); } } + + @Override + public void reload(Settings settings) { + enterpriseGeoIpDownloaderTaskExecutor.reload(settings); + } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java new file mode 100644 index 0000000000000..0a43d7a2d830b --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java @@ -0,0 +1,209 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Objects; +import java.util.Set; +import java.util.regex.Pattern; + +/** + * A database configuration is an identified (has an id) configuration of a named geoip location database to download, + * and the identifying information/configuration to download the named database from some database provider. + *

+ * That is, it has an id e.g. "my_db_config_1" and it says "download the file named XXXX from SomeCompany, and here's the + * magic token to use to do that." + */ +public record DatabaseConfiguration(String id, String name, Maxmind maxmind) implements Writeable, ToXContentObject { + + // id is a user selected signifier like 'my_domain_db' + // name is the name of a file that can be downloaded (like 'GeoIP2-Domain') + + // a configuration will have a 'type' like "maxmind", and that might have some more details, + // for now, though the important thing is that the json has to have it even though we don't model it meaningfully in this class + + public DatabaseConfiguration { + // these are invariants, not actual validation + Objects.requireNonNull(id); + Objects.requireNonNull(name); + Objects.requireNonNull(maxmind); + } + + /** + * An alphanumeric, followed by 0-126 alphanumerics, dashes, or underscores. That is, 1-127 alphanumerics, dashes, or underscores, + * but a leading dash or underscore isn't allowed (we're reserving leading dashes and underscores [and other odd characters] for + * Elastic and the future). + */ + private static final Pattern ID_PATTERN = Pattern.compile("\\p{Alnum}[_\\-\\p{Alnum}]{0,126}"); + + public static final Set MAXMIND_NAMES = Set.of( + "GeoIP2-Anonymous-IP", + "GeoIP2-City", + "GeoIP2-Connection-Type", + "GeoIP2-Country", + "GeoIP2-Domain", + "GeoIP2-Enterprise", + "GeoIP2-ISP" + + // in order to prevent a conflict between the (ordinary) geoip downloader and the enterprise geoip downloader, + // the enterprise geoip downloader is limited only to downloading the commercial files that the (ordinary) geoip downloader + // doesn't support out of the box -- in the future if we would like to relax this constraint, then we'll need to resolve that + // conflict at the same time. + + // "GeoLite2-ASN", + // "GeoLite2-City", + // "GeoLite2-Country" + ); + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField MAXMIND = new ParseField("maxmind"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "database", + false, + (a, id) -> { + String name = (String) a[0]; + Maxmind maxmind = (Maxmind) a[1]; + return new DatabaseConfiguration(id, name, maxmind); + } + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (parser, id) -> Maxmind.PARSER.apply(parser, null), MAXMIND); + } + + public DatabaseConfiguration(StreamInput in) throws IOException { + this(in.readString(), in.readString(), new Maxmind(in)); + } + + public static DatabaseConfiguration parse(XContentParser parser, String id) { + return PARSER.apply(parser, id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeString(name); + maxmind.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", name); + builder.field("maxmind", maxmind); + builder.endObject(); + return builder; + } + + /** + * An id is intended to be alphanumerics, dashes, and underscores (only), but we're reserving leading dashes and underscores for + * ourselves in the future, that is, they're not for the ones that users can PUT. + */ + static void validateId(String id) throws IllegalArgumentException { + if (Strings.isNullOrEmpty(id)) { + throw new IllegalArgumentException("invalid database configuration id [" + id + "]: must not be null or empty"); + } + MetadataCreateIndexService.validateIndexOrAliasName( + id, + (id1, description) -> new IllegalArgumentException("invalid database configuration id [" + id1 + "]: " + description) + ); + int byteCount = id.getBytes(StandardCharsets.UTF_8).length; + if (byteCount > 127) { + throw new IllegalArgumentException( + "invalid database configuration id [" + id + "]: id is too long, (" + byteCount + " > " + 127 + ")" + ); + } + if (ID_PATTERN.matcher(id).matches() == false) { + throw new IllegalArgumentException( + "invalid database configuration id [" + + id + + "]: id doesn't match required rules (alphanumerics, dashes, and underscores, only)" + ); + } + } + + public ActionRequestValidationException validate() { + ActionRequestValidationException err = new ActionRequestValidationException(); + + // how do we cross the id validation divide here? or do we? it seems unfortunate to not invoke it at all. + + // name validation + if (Strings.hasText(name) == false) { + err.addValidationError("invalid name [" + name + "]: cannot be empty"); + } + + if (MAXMIND_NAMES.contains(name) == false) { + err.addValidationError("invalid name [" + name + "]: must be a supported name ([" + MAXMIND_NAMES + "])"); + } + + // important: the name must be unique across all configurations of this same type, + // but we validate that in the cluster state update, not here. + try { + validateId(id); + } catch (IllegalArgumentException e) { + err.addValidationError(e.getMessage()); + } + return err.validationErrors().isEmpty() ? null : err; + } + + public record Maxmind(String accountId) implements Writeable, ToXContentObject { + + public Maxmind { + // this is an invariant, not actual validation + Objects.requireNonNull(accountId); + } + + private static final ParseField ACCOUNT_ID = new ParseField("account_id"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("database", false, (a, id) -> { + String accountId = (String) a[0]; + return new Maxmind(accountId); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ACCOUNT_ID); + } + + public Maxmind(StreamInput in) throws IOException { + this(in.readString()); + } + + public static Maxmind parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(accountId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("account_id", accountId); + builder.endObject(); + return builder; + } + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadata.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadata.java new file mode 100644 index 0000000000000..574f97e4c5e64 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadata.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * {@code DatabaseConfigurationMetadata} encapsulates a {@link DatabaseConfiguration} as well as + * the additional meta information like version (a monotonically incrementing number), and last modified date. + */ +public record DatabaseConfigurationMetadata(DatabaseConfiguration database, long version, long modifiedDate) + implements + SimpleDiffable, + ToXContentObject { + + public static final ParseField DATABASE = new ParseField("database"); + public static final ParseField VERSION = new ParseField("version"); + public static final ParseField MODIFIED_DATE_MILLIS = new ParseField("modified_date_millis"); + public static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + // later, things like this: + // static final ParseField LAST_SUCCESS = new ParseField("last_success"); + // static final ParseField LAST_FAILURE = new ParseField("last_failure"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "database_metadata", + true, + a -> { + DatabaseConfiguration database = (DatabaseConfiguration) a[0]; + return new DatabaseConfigurationMetadata(database, (long) a[1], (long) a[2]); + } + ); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), DatabaseConfiguration::parse, DATABASE); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_MILLIS); + } + + public static DatabaseConfigurationMetadata parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + public DatabaseConfigurationMetadata(StreamInput in) throws IOException { + this(new DatabaseConfiguration(in), in.readVLong(), in.readVLong()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // this is cluster state serialization, the id is implicit and doesn't need to included here + // (we'll be a in a json map where the id is the key) + builder.startObject(); + builder.field(VERSION.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_MILLIS.getPreferredName(), MODIFIED_DATE.getPreferredName(), modifiedDate); + builder.field(DATABASE.getPreferredName(), database); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + database.writeTo(out); + out.writeVLong(version); + out.writeVLong(modifiedDate); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(DatabaseConfigurationMetadata::new, in); + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DeleteDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DeleteDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..843cc986c47e7 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DeleteDatabaseConfigurationAction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteDatabaseConfigurationAction extends ActionType { + public static final DeleteDatabaseConfigurationAction INSTANCE = new DeleteDatabaseConfigurationAction(); + public static final String NAME = "cluster:admin/ingest/geoip/database/delete"; + + protected DeleteDatabaseConfigurationAction() { + super(NAME); + } + + public static class Request extends AcknowledgedRequest { + + private final String databaseId; + + public Request(StreamInput in) throws IOException { + super(in); + databaseId = in.readString(); + } + + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String databaseId) { + super(masterNodeTimeout, ackTimeout); + this.databaseId = Objects.requireNonNull(databaseId, "id may not be null"); + } + + public String getDatabaseId() { + return this.databaseId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(databaseId); + } + + @Override + public int hashCode() { + return databaseId.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(databaseId, other.databaseId); + } + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..546c0c2df821d --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.DATABASE; +import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.MODIFIED_DATE; +import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.MODIFIED_DATE_MILLIS; +import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.VERSION; + +public class GetDatabaseConfigurationAction extends ActionType { + public static final GetDatabaseConfigurationAction INSTANCE = new GetDatabaseConfigurationAction(); + public static final String NAME = "cluster:admin/ingest/geoip/database/get"; + + protected GetDatabaseConfigurationAction() { + super(NAME); + } + + public static class Request extends AcknowledgedRequest { + + private final String[] databaseIds; + + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String... databaseIds) { + super(masterNodeTimeout, ackTimeout); + this.databaseIds = Objects.requireNonNull(databaseIds, "ids may not be null"); + } + + public Request(StreamInput in) throws IOException { + super(in); + databaseIds = in.readStringArray(); + } + + public String[] getDatabaseIds() { + return this.databaseIds; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(databaseIds); + } + + @Override + public int hashCode() { + return Arrays.hashCode(databaseIds); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Arrays.equals(databaseIds, other.databaseIds); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final List databases; + + public Response(List databases) { + this.databases = List.copyOf(databases); // defensive copy + } + + public Response(StreamInput in) throws IOException { + this(in.readCollectionAsList(DatabaseConfigurationMetadata::new)); + } + + public List getDatabases() { + return this.databases; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("databases"); + for (DatabaseConfigurationMetadata item : databases) { + DatabaseConfiguration database = item.database(); + builder.startObject(); + builder.field("id", database.id()); // serialize including the id -- this is get response serialization + builder.field(VERSION.getPreferredName(), item.version()); + builder.timeField(MODIFIED_DATE_MILLIS.getPreferredName(), MODIFIED_DATE.getPreferredName(), item.modifiedDate()); + builder.field(DATABASE.getPreferredName(), database); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(databases); + } + + @Override + public int hashCode() { + return Objects.hash(databases); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Response other = (Response) obj; + return databases.equals(other.databases); + } + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..7bd5e1fa5cc68 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/PutDatabaseConfigurationAction.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class PutDatabaseConfigurationAction extends ActionType { + public static final PutDatabaseConfigurationAction INSTANCE = new PutDatabaseConfigurationAction(); + public static final String NAME = "cluster:admin/ingest/geoip/database/put"; + + protected PutDatabaseConfigurationAction() { + super(NAME); + } + + public static class Request extends AcknowledgedRequest { + + private final DatabaseConfiguration database; + + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, DatabaseConfiguration database) { + super(masterNodeTimeout, ackTimeout); + this.database = database; + } + + public Request(StreamInput in) throws IOException { + super(in); + database = new DatabaseConfiguration(in); + } + + public DatabaseConfiguration getDatabase() { + return this.database; + } + + public static Request parseRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String id, XContentParser parser) { + return new Request(masterNodeTimeout, ackTimeout, DatabaseConfiguration.parse(parser, id)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + database.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return database.validate(); + } + + @Override + public int hashCode() { + return Objects.hash(database); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return database.equals(other.database); + } + + @Override + public String toString() { + return Strings.toString((b, p) -> b.field(database.id(), database)); + } + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestDeleteDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestDeleteDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..4dc263224ad0a --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestDeleteDatabaseConfigurationAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getAckTimeout; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + +@ServerlessScope(Scope.INTERNAL) +public class RestDeleteDatabaseConfigurationAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(DELETE, "/_ingest/geoip/database/{id}")); + } + + @Override + public String getName() { + return "geoip_delete_database_configuration"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + final var req = new DeleteDatabaseConfigurationAction.Request( + getMasterNodeTimeout(request), + getAckTimeout(request), + request.param("id") + ); + return channel -> client.execute(DeleteDatabaseConfigurationAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestGetDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..b237ceb638918 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestGetDatabaseConfigurationAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getAckTimeout; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + +@ServerlessScope(Scope.INTERNAL) +public class RestGetDatabaseConfigurationAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(GET, "/_ingest/geoip/database"), new Route(GET, "/_ingest/geoip/database/{id}")); + } + + @Override + public String getName() { + return "geoip_get_database_configuration"; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { + final var req = new GetDatabaseConfigurationAction.Request( + getMasterNodeTimeout(request), + getAckTimeout(request), + Strings.splitStringByCommaToArray(request.param("id")) + ); + return channel -> client.execute(GetDatabaseConfigurationAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestPutDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..62b01b930d5cd --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestPutDatabaseConfigurationAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getAckTimeout; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + +@ServerlessScope(Scope.INTERNAL) +public class RestPutDatabaseConfigurationAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(PUT, "/_ingest/geoip/database/{id}")); + } + + @Override + public String getName() { + return "geoip_put_database_configuration"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + final Request req; + try (var parser = request.contentParser()) { + req = PutDatabaseConfigurationAction.Request.parseRequest( + getMasterNodeTimeout(request), + getAckTimeout(request), + request.param("id"), + parser + ); + } + return channel -> client.execute(PutDatabaseConfigurationAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..43aacee956279 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.SimpleBatchedExecutor; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; +import org.elasticsearch.ingest.geoip.direct.DeleteDatabaseConfigurationAction.Request; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.HashMap; +import java.util.Map; + +public class TransportDeleteDatabaseConfigurationAction extends TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportDeleteDatabaseConfigurationAction.class); + + private static final SimpleBatchedExecutor DELETE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() { + @Override + public Tuple executeTask(DeleteDatabaseConfigurationTask task, ClusterState clusterState) throws Exception { + return Tuple.tuple(task.execute(clusterState), null); + } + + @Override + public void taskSucceeded(DeleteDatabaseConfigurationTask task, Void unused) { + logger.trace("Updated cluster state for deletion of database configuration [{}]", task.databaseId); + task.listener.onResponse(AcknowledgedResponse.TRUE); + } + }; + + private final MasterServiceTaskQueue deleteDatabaseConfigurationTaskQueue; + + @Inject + public TransportDeleteDatabaseConfigurationAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + DeleteDatabaseConfigurationAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + Request::new, + indexNameExpressionResolver, + AcknowledgedResponse::readFrom, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.deleteDatabaseConfigurationTaskQueue = clusterService.createTaskQueue( + "delete-geoip-database-configuration-state-update", + Priority.NORMAL, + DELETE_TASK_EXECUTOR + ); + } + + @Override + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception { + final String id = request.getDatabaseId(); + final IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + if (geoIpMeta.getDatabases().containsKey(id) == false) { + throw new ResourceNotFoundException("Database configuration not found: {}", id); + } + deleteDatabaseConfigurationTaskQueue.submitTask( + Strings.format("delete-geoip-database-configuration-[%s]", id), + new DeleteDatabaseConfigurationTask(listener, id), + null + ); + } + + private record DeleteDatabaseConfigurationTask(ActionListener listener, String databaseId) + implements + ClusterStateTaskListener { + + ClusterState execute(ClusterState currentState) throws Exception { + final IngestGeoIpMetadata geoIpMeta = currentState.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + + logger.debug("deleting database configuration [{}]", databaseId); + Map databases = new HashMap<>(geoIpMeta.getDatabases()); + databases.remove(databaseId); + + Metadata currentMeta = currentState.metadata(); + return ClusterState.builder(currentState) + .metadata(Metadata.builder(currentMeta).putCustom(IngestGeoIpMetadata.TYPE, new IngestGeoIpMetadata(databases))) + .build(); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..a14a143e3f404 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class TransportGetDatabaseConfigurationAction extends TransportMasterNodeAction< + GetDatabaseConfigurationAction.Request, + GetDatabaseConfigurationAction.Response> { + + @Inject + public TransportGetDatabaseConfigurationAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetDatabaseConfigurationAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetDatabaseConfigurationAction.Request::new, + indexNameExpressionResolver, + GetDatabaseConfigurationAction.Response::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + } + + @Override + protected void masterOperation( + final Task task, + final GetDatabaseConfigurationAction.Request request, + final ClusterState state, + final ActionListener listener + ) { + final Set ids; + if (request.getDatabaseIds().length == 0) { + // if we did not ask for a specific name, then return all databases + ids = Set.of("*"); + } else { + ids = new LinkedHashSet<>(Arrays.asList(request.getDatabaseIds())); + } + + if (ids.size() > 1 && ids.stream().anyMatch(Regex::isSimpleMatchPattern)) { + throw new IllegalArgumentException( + "wildcard only supports a single value, please use comma-separated values or a single wildcard value" + ); + } + + final IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + List results = new ArrayList<>(); + + for (String id : ids) { + if (Regex.isSimpleMatchPattern(id)) { + for (Map.Entry entry : geoIpMeta.getDatabases().entrySet()) { + if (Regex.simpleMatch(id, entry.getKey())) { + results.add(entry.getValue()); + } + } + } else { + DatabaseConfigurationMetadata meta = geoIpMeta.getDatabases().get(id); + if (meta == null) { + listener.onFailure(new ResourceNotFoundException("database configuration not found: {}", id)); + return; + } else { + results.add(meta); + } + } + } + + listener.onResponse(new GetDatabaseConfigurationAction.Response(results)); + } + + @Override + protected ClusterBlockException checkBlock(GetDatabaseConfigurationAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java new file mode 100644 index 0000000000000..540be68671d38 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.SimpleBatchedExecutor; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; +import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.time.Instant; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class); + + private static final SimpleBatchedExecutor UPDATE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() { + @Override + public Tuple executeTask(UpdateDatabaseConfigurationTask task, ClusterState clusterState) throws Exception { + return Tuple.tuple(task.execute(clusterState), null); + } + + @Override + public void taskSucceeded(UpdateDatabaseConfigurationTask task, Void unused) { + logger.trace("Updated cluster state for creation-or-update of database configuration [{}]", task.database.id()); + task.listener.onResponse(AcknowledgedResponse.TRUE); + } + }; + + private final MasterServiceTaskQueue updateDatabaseConfigurationTaskQueue; + + @Inject + public TransportPutDatabaseConfigurationAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + PutDatabaseConfigurationAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + Request::new, + indexNameExpressionResolver, + AcknowledgedResponse::readFrom, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue( + "update-geoip-database-configuration-state-update", + Priority.NORMAL, + UPDATE_TASK_EXECUTOR + ); + } + + @Override + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { + final String id = request.getDatabase().id(); + updateDatabaseConfigurationTaskQueue.submitTask( + Strings.format("update-geoip-database-configuration-[%s]", id), + new UpdateDatabaseConfigurationTask(listener, request.getDatabase()), + null + ); + } + + /** + * Returns 'true' if the database configuration is effectually the same, and thus can be a no-op update. + */ + static boolean isNoopUpdate(@Nullable DatabaseConfigurationMetadata existingDatabase, DatabaseConfiguration newDatabase) { + if (existingDatabase == null) { + return false; + } else { + return newDatabase.equals(existingDatabase.database()); + } + } + + static void validatePrerequisites(DatabaseConfiguration database, ClusterState state) { + // we need to verify that the database represents a unique file (name) among the various databases for this same provider + IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + + Optional sameName = geoIpMeta.getDatabases() + .values() + .stream() + .map(DatabaseConfigurationMetadata::database) + // .filter(d -> d.type().equals(database.type())) // of the same type (right now the type is always just 'maxmind') + .filter(d -> d.id().equals(database.id()) == false) // and a different id + .filter(d -> d.name().equals(database.name())) // but has the same name! + .findFirst(); + + sameName.ifPresent(d -> { + throw new IllegalArgumentException( + Strings.format("database [%s] is already being downloaded via configuration [%s]", database.name(), d.id()) + ); + }); + } + + private record UpdateDatabaseConfigurationTask(ActionListener listener, DatabaseConfiguration database) + implements + ClusterStateTaskListener { + + ClusterState execute(ClusterState currentState) throws Exception { + IngestGeoIpMetadata geoIpMeta = currentState.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + + String id = database.id(); + final DatabaseConfigurationMetadata existingDatabase = geoIpMeta.getDatabases().get(id); + // double-check for no-op in the state update task, in case it was changed/reset in the meantime + if (isNoopUpdate(existingDatabase, database)) { + return currentState; + } + + validatePrerequisites(database, currentState); + + Map databases = new HashMap<>(geoIpMeta.getDatabases()); + databases.put( + id, + new DatabaseConfigurationMetadata( + database, + existingDatabase == null ? 1 : existingDatabase.version() + 1, + Instant.now().toEpochMilli() + ) + ); + geoIpMeta = new IngestGeoIpMetadata(databases); + + if (existingDatabase == null) { + logger.debug("adding new database configuration [{}]", id); + } else { + logger.debug("updating existing database configuration [{}]", id); + } + + Metadata currentMeta = currentState.metadata(); + return ClusterState.builder(currentState) + .metadata(Metadata.builder(currentMeta).putCustom(IngestGeoIpMetadata.TYPE, geoIpMeta)) + .build(); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java new file mode 100644 index 0000000000000..58cb566165db2 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java @@ -0,0 +1,538 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteRequest.OpType; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.flush.FlushAction; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.index.TransportIndexAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.EnterpriseGeoIpTask; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; +import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.PasswordAuthentication; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; + +import static org.elasticsearch.ingest.geoip.DatabaseNodeServiceTests.createClusterState; +import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloader.MAX_CHUNK_SIZE; +import static org.elasticsearch.tasks.TaskId.EMPTY_TASK_ID; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +public class EnterpriseGeoIpDownloaderTests extends ESTestCase { + + private HttpClient httpClient; + private ClusterService clusterService; + private ThreadPool threadPool; + private MockClient client; + private EnterpriseGeoIpDownloader geoIpDownloader; + + @Before + public void setup() throws IOException { + httpClient = mock(HttpClient.class); + when(httpClient.getBytes(any(), anyString())).thenReturn( + "e4a3411cdd7b21eaf18675da5a7f9f360d33c6882363b2c19c38715834c9e836 GeoIP2-City_20240709.tar.gz".getBytes(StandardCharsets.UTF_8) + ); + clusterService = mock(ClusterService.class); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, Set.of(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING)) + ); + ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); + when(clusterService.state()).thenReturn(state); + client = new MockClient(threadPool); + geoIpDownloader = new EnterpriseGeoIpDownloader( + client, + httpClient, + clusterService, + threadPool, + 1, + "", + "", + "", + EMPTY_TASK_ID, + Map.of(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray()) + ) { + { + EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams geoIpTaskParams = mock(EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams.class); + when(geoIpTaskParams.getWriteableName()).thenReturn(EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER); + init(new PersistentTasksService(clusterService, threadPool, client), null, null, 0); + } + }; + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testGetChunkEndOfStream() throws IOException { + byte[] chunk = EnterpriseGeoIpDownloader.getChunk(new InputStream() { + @Override + public int read() { + return -1; + } + }); + assertArrayEquals(new byte[0], chunk); + chunk = EnterpriseGeoIpDownloader.getChunk(new ByteArrayInputStream(new byte[0])); + assertArrayEquals(new byte[0], chunk); + } + + public void testGetChunkLessThanChunkSize() throws IOException { + ByteArrayInputStream is = new ByteArrayInputStream(new byte[] { 1, 2, 3, 4 }); + byte[] chunk = EnterpriseGeoIpDownloader.getChunk(is); + assertArrayEquals(new byte[] { 1, 2, 3, 4 }, chunk); + chunk = EnterpriseGeoIpDownloader.getChunk(is); + assertArrayEquals(new byte[0], chunk); + + } + + public void testGetChunkExactlyChunkSize() throws IOException { + byte[] bigArray = new byte[MAX_CHUNK_SIZE]; + for (int i = 0; i < MAX_CHUNK_SIZE; i++) { + bigArray[i] = (byte) i; + } + ByteArrayInputStream is = new ByteArrayInputStream(bigArray); + byte[] chunk = EnterpriseGeoIpDownloader.getChunk(is); + assertArrayEquals(bigArray, chunk); + chunk = EnterpriseGeoIpDownloader.getChunk(is); + assertArrayEquals(new byte[0], chunk); + } + + public void testGetChunkMoreThanChunkSize() throws IOException { + byte[] bigArray = new byte[MAX_CHUNK_SIZE * 2]; + for (int i = 0; i < MAX_CHUNK_SIZE * 2; i++) { + bigArray[i] = (byte) i; + } + byte[] smallArray = new byte[MAX_CHUNK_SIZE]; + System.arraycopy(bigArray, 0, smallArray, 0, MAX_CHUNK_SIZE); + ByteArrayInputStream is = new ByteArrayInputStream(bigArray); + byte[] chunk = EnterpriseGeoIpDownloader.getChunk(is); + assertArrayEquals(smallArray, chunk); + System.arraycopy(bigArray, MAX_CHUNK_SIZE, smallArray, 0, MAX_CHUNK_SIZE); + chunk = EnterpriseGeoIpDownloader.getChunk(is); + assertArrayEquals(smallArray, chunk); + chunk = EnterpriseGeoIpDownloader.getChunk(is); + assertArrayEquals(new byte[0], chunk); + } + + public void testGetChunkRethrowsIOException() { + expectThrows(IOException.class, () -> EnterpriseGeoIpDownloader.getChunk(new InputStream() { + @Override + public int read() throws IOException { + throw new IOException(); + } + })); + } + + public void testIndexChunksNoData() throws IOException { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); + + InputStream empty = new ByteArrayInputStream(new byte[0]); + assertEquals( + Tuple.tuple(0, "d41d8cd98f00b204e9800998ecf8427e"), + geoIpDownloader.indexChunks( + "test", + empty, + 0, + MessageDigests.sha256(), + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + 0 + ) + ); + } + + public void testIndexChunksMd5Mismatch() { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); + + IOException exception = expectThrows( + IOException.class, + () -> geoIpDownloader.indexChunks("test", new ByteArrayInputStream(new byte[0]), 0, MessageDigests.sha256(), "123123", 0) + ); + assertEquals( + "checksum mismatch, expected [123123], actual [e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855]", + exception.getMessage() + ); + } + + public void testIndexChunks() throws IOException { + byte[] bigArray = new byte[MAX_CHUNK_SIZE + 20]; + for (int i = 0; i < MAX_CHUNK_SIZE + 20; i++) { + bigArray[i] = (byte) i; + } + byte[][] chunksData = new byte[2][]; + chunksData[0] = new byte[MAX_CHUNK_SIZE]; + System.arraycopy(bigArray, 0, chunksData[0], 0, MAX_CHUNK_SIZE); + chunksData[1] = new byte[20]; + System.arraycopy(bigArray, MAX_CHUNK_SIZE, chunksData[1], 0, 20); + + AtomicInteger chunkIndex = new AtomicInteger(); + + client.addHandler(TransportIndexAction.TYPE, (IndexRequest request, ActionListener listener) -> { + int chunk = chunkIndex.getAndIncrement(); + assertEquals(OpType.CREATE, request.opType()); + assertThat(request.id(), Matchers.startsWith("test_" + (chunk + 15) + "_")); + assertEquals(XContentType.SMILE, request.getContentType()); + Map source = request.sourceAsMap(); + assertEquals("test", source.get("name")); + assertArrayEquals(chunksData[chunk], (byte[]) source.get("data")); + assertEquals(chunk + 15, source.get("chunk")); + listener.onResponse(mock(IndexResponse.class)); + }); + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { EnterpriseGeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); + + InputStream big = new ByteArrayInputStream(bigArray); + assertEquals( + Tuple.tuple(17, "a67563dfa8f3cba8b8cff61eb989a749"), + geoIpDownloader.indexChunks( + "test", + big, + 15, + MessageDigests.sha256(), + "f2304545f224ff9ffcc585cb0a993723f911e03beb552cc03937dd443e931eab", + 0 + ) + ); + + assertEquals(2, chunkIndex.get()); + } + + public void testProcessDatabaseNew() throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]); + when(httpClient.get(any(), any())).thenReturn(bais); + AtomicBoolean indexedChunks = new AtomicBoolean(false); + geoIpDownloader = new EnterpriseGeoIpDownloader( + client, + httpClient, + clusterService, + threadPool, + 1, + "", + "", + "", + EMPTY_TASK_ID, + Map.of(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray()) + ) { + @Override + protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) { + fail(); + } + + @Override + Tuple indexChunks( + String name, + InputStream is, + int chunk, + MessageDigest digest, + String expectedMd5, + long start + ) { + assertSame(bais, is); + assertEquals(0, chunk); + indexedChunks.set(true); + return Tuple.tuple(11, expectedMd5); + } + + @Override + void updateTaskState() { + assertEquals(0, state.getDatabases().get("test.mmdb").firstChunk()); + assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk()); + } + + @Override + void deleteOldChunks(String name, int firstChunk) { + assertEquals("test.mmdb", name); + assertEquals(0, firstChunk); + } + }; + + geoIpDownloader.setState(EnterpriseGeoIpTaskState.EMPTY); + PasswordAuthentication auth = new PasswordAuthentication("name", "password".toCharArray()); + String id = randomIdentifier(); + DatabaseConfiguration databaseConfiguration = new DatabaseConfiguration(id, "test", new DatabaseConfiguration.Maxmind("name")); + geoIpDownloader.processDatabase(auth, databaseConfiguration); + assertThat(indexedChunks.get(), equalTo(true)); + } + + public void testProcessDatabaseUpdate() throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]); + when(httpClient.get(any(), any())).thenReturn(bais); + AtomicBoolean indexedChunks = new AtomicBoolean(false); + geoIpDownloader = new EnterpriseGeoIpDownloader( + client, + httpClient, + clusterService, + threadPool, + 1, + "", + "", + "", + EMPTY_TASK_ID, + Map.of(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray()) + ) { + @Override + protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) { + fail(); + } + + @Override + Tuple indexChunks( + String name, + InputStream is, + int chunk, + MessageDigest digest, + String expectedMd5, + long start + ) { + assertSame(bais, is); + assertEquals(9, chunk); + indexedChunks.set(true); + return Tuple.tuple(1, expectedMd5); + } + + @Override + void updateTaskState() { + assertEquals(9, state.getDatabases().get("test.mmdb").firstChunk()); + assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk()); + } + + @Override + void deleteOldChunks(String name, int firstChunk) { + assertEquals("test.mmdb", name); + assertEquals(9, firstChunk); + } + }; + + geoIpDownloader.setState(EnterpriseGeoIpTaskState.EMPTY.put("test.mmdb", new GeoIpTaskState.Metadata(0, 5, 8, "0", 0))); + PasswordAuthentication auth = new PasswordAuthentication("name", "password".toCharArray()); + String id = randomIdentifier(); + DatabaseConfiguration databaseConfiguration = new DatabaseConfiguration(id, "test", new DatabaseConfiguration.Maxmind("name")); + geoIpDownloader.processDatabase(auth, databaseConfiguration); + assertThat(indexedChunks.get(), equalTo(true)); + } + + public void testProcessDatabaseSame() throws IOException { + GeoIpTaskState.Metadata metadata = new GeoIpTaskState.Metadata( + 0, + 4, + 10, + "1", + 0, + "e4a3411cdd7b21eaf18675da5a7f9f360d33c6882363b2c19c38715834c9e836" + ); + EnterpriseGeoIpTaskState taskState = EnterpriseGeoIpTaskState.EMPTY.put("test.mmdb", metadata); + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]); + when(httpClient.get(any(), any())).thenReturn(bais); + + geoIpDownloader = new EnterpriseGeoIpDownloader( + client, + httpClient, + clusterService, + threadPool, + 1, + "", + "", + "", + EMPTY_TASK_ID, + Map.of(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + (input) -> new HttpClient.PasswordAuthenticationHolder("name", "password".toCharArray()) + ) { + @Override + protected void updateTimestamp(String name, GeoIpTaskState.Metadata newMetadata) { + assertEquals(metadata, newMetadata); + assertEquals("test.mmdb", name); + } + + @Override + Tuple indexChunks( + String name, + InputStream is, + int chunk, + MessageDigest digest, + String expectedChecksum, + long start + ) { + fail(); + return Tuple.tuple(0, expectedChecksum); + } + + @Override + void updateTaskState() { + fail(); + } + + @Override + void deleteOldChunks(String name, int firstChunk) { + fail(); + } + }; + geoIpDownloader.setState(taskState); + PasswordAuthentication auth = new PasswordAuthentication("name", "password".toCharArray()); + String id = randomIdentifier(); + DatabaseConfiguration databaseConfiguration = new DatabaseConfiguration(id, "test", new DatabaseConfiguration.Maxmind("name")); + geoIpDownloader.processDatabase(auth, databaseConfiguration); + } + + public void testUpdateDatabasesWriteBlock() { + ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); + var geoIpIndex = state.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); + state = ClusterState.builder(state) + .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) + .build(); + when(clusterService.state()).thenReturn(state); + var e = expectThrows(ClusterBlockException.class, () -> geoIpDownloader.updateDatabases()); + assertThat( + e.getMessage(), + equalTo( + "index [" + + geoIpIndex + + "] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " + + "index has read-only-allow-delete block];" + ) + ); + verifyNoInteractions(httpClient); + } + + public void testUpdateDatabasesIndexNotReady() throws IOException { + ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()), true); + var geoIpIndex = state.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); + state = ClusterState.builder(state) + .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) + .build(); + when(clusterService.state()).thenReturn(state); + geoIpDownloader.updateDatabases(); + verifyNoInteractions(httpClient); + } + + private GeoIpTaskState.Metadata newGeoIpTaskStateMetadata(boolean expired) { + Instant lastChecked; + if (expired) { + lastChecked = Instant.now().minus(randomIntBetween(31, 100), ChronoUnit.DAYS); + } else { + lastChecked = Instant.now().minus(randomIntBetween(0, 29), ChronoUnit.DAYS); + } + return new GeoIpTaskState.Metadata(0, 0, 0, randomAlphaOfLength(20), lastChecked.toEpochMilli()); + } + + private static class MockClient extends NoOpClient { + + private final Map, BiConsumer>> handlers = new HashMap<>(); + + private MockClient(ThreadPool threadPool) { + super(threadPool); + } + + public void addHandler( + ActionType action, + BiConsumer> listener + ) { + handlers.put(action, listener); + } + + @SuppressWarnings("unchecked") + @Override + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + if (handlers.containsKey(action)) { + BiConsumer> biConsumer = (BiConsumer>) handlers.get( + action + ); + biConsumer.accept(request, listener); + } else { + throw new IllegalStateException("unexpected action called [" + action.name() + "]"); + } + } + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskStateSerializationTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskStateSerializationTests.java new file mode 100644 index 0000000000000..a136f90780989 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskStateSerializationTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class EnterpriseGeoIpTaskStateSerializationTests extends AbstractXContentSerializingTestCase { + @Override + protected GeoIpTaskState doParseInstance(XContentParser parser) throws IOException { + return GeoIpTaskState.fromXContent(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return GeoIpTaskState::new; + } + + @Override + protected GeoIpTaskState createTestInstance() { + GeoIpTaskState state = GeoIpTaskState.EMPTY; + int databaseCount = randomInt(20); + for (int i = 0; i < databaseCount; i++) { + state = state.put(randomAlphaOfLengthBetween(5, 10), createRandomMetadata()); + } + return state; + } + + @Override + protected GeoIpTaskState mutateInstance(GeoIpTaskState instance) { + Map databases = new HashMap<>(instance.getDatabases()); + switch (between(0, 2)) { + case 0: + String databaseName = randomValueOtherThanMany(databases::containsKey, () -> randomAlphaOfLengthBetween(5, 10)); + databases.put(databaseName, createRandomMetadata()); + return new GeoIpTaskState(databases); + case 1: + if (databases.size() > 0) { + String randomDatabaseName = databases.keySet().iterator().next(); + databases.put(randomDatabaseName, createRandomMetadata()); + } else { + databases.put(randomAlphaOfLengthBetween(5, 10), createRandomMetadata()); + } + return new GeoIpTaskState(databases); + case 2: + if (databases.size() > 0) { + String randomDatabaseName = databases.keySet().iterator().next(); + databases.remove(randomDatabaseName); + } else { + databases.put(randomAlphaOfLengthBetween(5, 10), createRandomMetadata()); + } + return new GeoIpTaskState(databases); + default: + throw new AssertionError("failure, got illegal switch case"); + } + } + + private GeoIpTaskState.Metadata createRandomMetadata() { + return new GeoIpTaskState.Metadata(randomLong(), randomInt(), randomInt(), randomAlphaOfLength(32), randomLong()); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 6a83fe69473f7..06b2605bd6d41 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -426,6 +426,55 @@ void deleteOldChunks(String name, int firstChunk) { assertEquals(0, stats.getFailedDownloads()); } + public void testCleanDatabases() throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[0]); + when(httpClient.get("http://a.b/t1")).thenReturn(bais); + + final AtomicInteger count = new AtomicInteger(0); + + geoIpDownloader = new GeoIpDownloader( + client, + httpClient, + clusterService, + threadPool, + Settings.EMPTY, + 1, + "", + "", + "", + EMPTY_TASK_ID, + Map.of(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true + ) { + @Override + void updateDatabases() throws IOException { + // noop + } + + @Override + void deleteOldChunks(String name, int firstChunk) { + count.incrementAndGet(); + assertEquals("test.mmdb", name); + assertEquals(21, firstChunk); + } + + @Override + void updateTaskState() { + // noop + } + }; + + geoIpDownloader.setState(GeoIpTaskState.EMPTY.put("test.mmdb", new GeoIpTaskState.Metadata(10, 10, 20, "md5", 20))); + geoIpDownloader.runDownloader(); + geoIpDownloader.runDownloader(); + GeoIpDownloaderStats stats = geoIpDownloader.getStatus(); + assertEquals(1, stats.getExpiredDatabases()); + assertEquals(2, count.get()); // somewhat surprising, not necessarily wrong + assertEquals(18, geoIpDownloader.state.getDatabases().get("test.mmdb").lastCheck()); // highly surprising, seems wrong + } + @SuppressWarnings("unchecked") public void testUpdateTaskState() { geoIpDownloader = new GeoIpDownloader( diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java new file mode 100644 index 0000000000000..eca23cb13cd3d --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadataTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class IngestGeoIpMetadataTests extends AbstractChunkedSerializingTestCase { + @Override + protected IngestGeoIpMetadata doParseInstance(XContentParser parser) throws IOException { + return IngestGeoIpMetadata.fromXContent(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return IngestGeoIpMetadata::new; + } + + @Override + protected IngestGeoIpMetadata createTestInstance() { + return randomIngestGeoIpMetadata(); + } + + @Override + protected IngestGeoIpMetadata mutateInstance(IngestGeoIpMetadata instance) throws IOException { + Map databases = new HashMap<>(instance.getDatabases()); + switch (between(0, 2)) { + case 0 -> { + String databaseId = randomValueOtherThanMany(databases::containsKey, ESTestCase::randomIdentifier); + databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId)); + return new IngestGeoIpMetadata(databases); + } + case 1 -> { + if (databases.size() > 0) { + String randomDatabaseId = databases.keySet().iterator().next(); + databases.put(randomDatabaseId, randomDatabaseConfigurationMetadata(randomDatabaseId)); + } else { + String databaseId = randomIdentifier(); + databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId)); + } + return new IngestGeoIpMetadata(databases); + } + case 2 -> { + if (databases.size() > 0) { + String randomDatabaseId = databases.keySet().iterator().next(); + databases.remove(randomDatabaseId); + } else { + String databaseId = randomIdentifier(); + databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId)); + } + return new IngestGeoIpMetadata(databases); + } + default -> throw new AssertionError("failure, got illegal switch case"); + } + } + + private IngestGeoIpMetadata randomIngestGeoIpMetadata() { + Map databases = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 20); i++) { + String databaseId = randomIdentifier(); + databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId)); + } + return new IngestGeoIpMetadata(databases); + } + + private DatabaseConfigurationMetadata randomDatabaseConfigurationMetadata(String id) { + return new DatabaseConfigurationMetadata( + randomDatabaseConfiguration(id), + randomNonNegativeLong(), + randomPositiveTimeValue().millis() + ); + } + + private DatabaseConfiguration randomDatabaseConfiguration(String id) { + return new DatabaseConfiguration(id, randomAlphaOfLength(10), new DatabaseConfiguration.Maxmind(randomAlphaOfLength(10))); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java new file mode 100644 index 0000000000000..f035416d48068 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationMetadataTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +import static org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.MAXMIND_NAMES; +import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationTests.randomDatabaseConfiguration; + +public class DatabaseConfigurationMetadataTests extends AbstractXContentSerializingTestCase { + + private String id; + + @Override + protected DatabaseConfigurationMetadata doParseInstance(XContentParser parser) throws IOException { + return DatabaseConfigurationMetadata.parse(parser, id); + } + + @Override + protected DatabaseConfigurationMetadata createTestInstance() { + id = randomAlphaOfLength(5); + return randomDatabaseConfigurationMetadata(id); + } + + public static DatabaseConfigurationMetadata randomDatabaseConfigurationMetadata(String id) { + return new DatabaseConfigurationMetadata( + new DatabaseConfiguration(id, randomFrom(MAXMIND_NAMES), new DatabaseConfiguration.Maxmind(randomAlphaOfLength(5))), + randomNonNegativeLong(), + randomPositiveTimeValue().millis() + ); + } + + @Override + protected DatabaseConfigurationMetadata mutateInstance(DatabaseConfigurationMetadata instance) { + switch (between(0, 2)) { + case 0: + return new DatabaseConfigurationMetadata( + randomValueOtherThan(instance.database(), () -> randomDatabaseConfiguration(randomAlphaOfLength(5))), + instance.version(), + instance.modifiedDate() + ); + case 1: + return new DatabaseConfigurationMetadata( + instance.database(), + randomValueOtherThan(instance.version(), ESTestCase::randomNonNegativeLong), + instance.modifiedDate() + ); + case 2: + return new DatabaseConfigurationMetadata( + instance.database(), + instance.version(), + randomValueOtherThan(instance.modifiedDate(), () -> ESTestCase.randomPositiveTimeValue().millis()) + ); + default: + throw new AssertionError("failure, got illegal switch case"); + } + } + + @Override + protected Writeable.Reader instanceReader() { + return DatabaseConfigurationMetadata::new; + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java new file mode 100644 index 0000000000000..02c067561b49c --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfigurationTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.Maxmind; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Set; + +import static org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration.MAXMIND_NAMES; + +public class DatabaseConfigurationTests extends AbstractXContentSerializingTestCase { + + private String id; + + @Override + protected DatabaseConfiguration doParseInstance(XContentParser parser) throws IOException { + return DatabaseConfiguration.parse(parser, id); + } + + @Override + protected DatabaseConfiguration createTestInstance() { + id = randomAlphaOfLength(5); + return randomDatabaseConfiguration(id); + } + + public static DatabaseConfiguration randomDatabaseConfiguration(String id) { + return new DatabaseConfiguration(id, randomFrom(MAXMIND_NAMES), new Maxmind(randomAlphaOfLength(5))); + } + + @Override + protected DatabaseConfiguration mutateInstance(DatabaseConfiguration instance) { + switch (between(0, 2)) { + case 0: + return new DatabaseConfiguration(instance.id() + randomAlphaOfLength(2), instance.name(), instance.maxmind()); + case 1: + return new DatabaseConfiguration( + instance.id(), + randomValueOtherThan(instance.name(), () -> randomFrom(MAXMIND_NAMES)), + instance.maxmind() + ); + case 2: + return new DatabaseConfiguration( + instance.id(), + instance.name(), + new Maxmind(instance.maxmind().accountId() + randomAlphaOfLength(2)) + ); + default: + throw new AssertionError("failure, got illegal switch case"); + } + } + + @Override + protected Writeable.Reader instanceReader() { + return DatabaseConfiguration::new; + } + + public void testValidateId() { + Set invalidIds = Set.of("-foo", "_foo", "foo,bar", "foo bar", "foo*bar", "foo.bar"); + for (String id : invalidIds) { + expectThrows(IllegalArgumentException.class, "expected exception for " + id, () -> DatabaseConfiguration.validateId(id)); + } + Set validIds = Set.of("f-oo", "f_oo", "foobar"); + for (String id : validIds) { + DatabaseConfiguration.validateId(id); + } + // Note: the code checks for byte length, but randomAlphoOfLength is only using characters in the ascii subset + String longId = randomAlphaOfLength(128); + expectThrows(IllegalArgumentException.class, "expected exception for " + longId, () -> DatabaseConfiguration.validateId(longId)); + String longestAllowedId = randomAlphaOfLength(127); + DatabaseConfiguration.validateId(longestAllowedId); + String shortId = randomAlphaOfLengthBetween(1, 127); + DatabaseConfiguration.validateId(shortId); + expectThrows(IllegalArgumentException.class, "expected exception for empty string", () -> DatabaseConfiguration.validateId("")); + expectThrows(IllegalArgumentException.class, "expected exception for null string", () -> DatabaseConfiguration.validateId(null)); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java new file mode 100644 index 0000000000000..710c3ee23916d --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class TransportPutDatabaseConfigurationActionTests extends ESTestCase { + + public void testValidatePrerequisites() { + // Test that we reject two configurations with the same database name but different ids: + String name = randomAlphaOfLengthBetween(1, 50); + IngestGeoIpMetadata ingestGeoIpMetadata = randomIngestGeoIpMetadata(name); + ClusterState state = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder(Metadata.EMPTY_METADATA).putCustom(IngestGeoIpMetadata.TYPE, ingestGeoIpMetadata)) + .build(); + DatabaseConfiguration databaseConfiguration = randomDatabaseConfiguration(randomIdentifier(), name); + expectThrows( + IllegalArgumentException.class, + () -> TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfiguration, state) + ); + + // Test that we do not reject two configurations with different database names: + String differentName = randomValueOtherThan(name, () -> randomAlphaOfLengthBetween(1, 50)); + DatabaseConfiguration databaseConfigurationForDifferentName = randomDatabaseConfiguration(randomIdentifier(), differentName); + TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfigurationForDifferentName, state); + + // Test that we do not reject a configuration if none already exists: + TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfiguration, ClusterState.EMPTY_STATE); + + // Test that we do not reject a configuration if one with the same database name AND id already exists: + DatabaseConfiguration databaseConfigurationSameNameSameId = ingestGeoIpMetadata.getDatabases() + .values() + .iterator() + .next() + .database(); + TransportPutDatabaseConfigurationAction.validatePrerequisites(databaseConfigurationSameNameSameId, state); + } + + private IngestGeoIpMetadata randomIngestGeoIpMetadata(String name) { + Map databases = new HashMap<>(); + String databaseId = randomIdentifier(); + databases.put(databaseId, randomDatabaseConfigurationMetadata(databaseId, name)); + return new IngestGeoIpMetadata(databases); + } + + private DatabaseConfigurationMetadata randomDatabaseConfigurationMetadata(String id, String name) { + return new DatabaseConfigurationMetadata( + randomDatabaseConfiguration(id, name), + randomNonNegativeLong(), + randomPositiveTimeValue().millis() + ); + } + + private DatabaseConfiguration randomDatabaseConfiguration(String id, String name) { + return new DatabaseConfiguration(id, name, new DatabaseConfiguration.Maxmind(randomAlphaOfLength(10))); + } +} diff --git a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java index 58a6e3771b30d..0f0a0c998bd75 100644 --- a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java +++ b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java @@ -46,7 +46,12 @@ public class IngestGeoIpClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase .module("reindex") .module("ingest-geoip") .systemProperty("ingest.geoip.downloader.enabled.default", "true") + // sets the plain (geoip.elastic.co) downloader endpoint, which is used in these tests .setting("ingest.geoip.downloader.endpoint", () -> fixture.getAddress(), s -> useFixture) + // also sets the enterprise downloader maxmind endpoint, to make sure we do not accidentally hit the real endpoint from tests + // note: it's not important that the downloading actually work at this point -- the rest tests (so far) don't exercise + // the downloading code because of license reasons -- but if they did, then it would be important that we're hitting a fixture + .systemProperty("ingest.geoip.downloader.maxmind.endpoint.default", () -> fixture.getAddress(), s -> useFixture) .build(); @ClassRule diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml new file mode 100644 index 0000000000000..6809443fdfbc3 --- /dev/null +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml @@ -0,0 +1,72 @@ +setup: + - requires: + cluster_features: ["geoip.downloader.database.configuration"] + reason: "geoip downloader database configuration APIs added in 8.15" + +--- +"Test adding, getting, and removing geoip databases": + - do: + ingest.put_geoip_database: + id: "my_database_1" + body: > + { + "name": "GeoIP2-City", + "maxmind": { + "account_id": "1234" + } + } + - match: { acknowledged: true } + + - do: + ingest.put_geoip_database: + id: "my_database_1" + body: > + { + "name": "GeoIP2-Country", + "maxmind": { + "account_id": "4321" + } + } + - match: { acknowledged: true } + + - do: + ingest.put_geoip_database: + id: "my_database_2" + body: > + { + "name": "GeoIP2-City", + "maxmind": { + "account_id": "1234" + } + } + - match: { acknowledged: true } + + - do: + ingest.get_geoip_database: + id: "my_database_1" + - length: { databases: 1 } + - match: { databases.0.id: "my_database_1" } + - gte: { databases.0.modified_date_millis: 0 } + - match: { databases.0.database.name: "GeoIP2-Country" } + - match: { databases.0.database.maxmind.account_id: "4321" } + + - do: + ingest.get_geoip_database: {} + - length: { databases: 2 } + + - do: + ingest.get_geoip_database: + id: "my_database_1,my_database_2" + - length: { databases: 2 } + + - do: + ingest.delete_geoip_database: + id: "my_database_1" + + - do: + ingest.get_geoip_database: {} + - length: { databases: 1 } + - match: { databases.0.id: "my_database_2" } + - gte: { databases.0.modified_date_millis: 0 } + - match: { databases.0.database.name: "GeoIP2-City" } + - match: { databases.0.database.maxmind.account_id: "1234" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json new file mode 100644 index 0000000000000..ef6dc94dd27a6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json @@ -0,0 +1,31 @@ +{ + "ingest.delete_geoip_database":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html", + "description":"Deletes a geoip database configuration" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ingest/geoip/database/{id}", + "methods":[ + "DELETE" + ], + "parts":{ + "id":{ + "type":"list", + "description":"A comma-separated list of geoip database configurations to delete" + } + } + } + ] + }, + "params":{ + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_geoip_database.json new file mode 100644 index 0000000000000..96f028e2e5251 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_geoip_database.json @@ -0,0 +1,37 @@ +{ + "ingest.get_geoip_database":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html", + "description":"Returns geoip database configuration." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ingest/geoip/database", + "methods":[ + "GET" + ] + }, + { + "path":"/_ingest/geoip/database/{id}", + "methods":[ + "GET" + ], + "parts":{ + "id":{ + "type":"list", + "description":"A comma-separated list of geoip database configurations to get; use `*` to get all geoip database configurations" + } + } + } + ] + }, + "params":{ + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json new file mode 100644 index 0000000000000..07f9e37740279 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json @@ -0,0 +1,35 @@ +{ + "ingest.put_geoip_database":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/TODO.html", + "description":"Puts the configuration for a geoip database to be downloaded" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ingest/geoip/database/{id}", + "methods":[ + "PUT" + ], + "parts":{ + "id":{ + "type":"string", + "description":"The id of the database configuration" + } + } + } + ] + }, + "params":{ + }, + "body":{ + "description":"The database configuration definition", + "required":true + } + } +} diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index cd17a435388d3..bf5f88d264612 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -432,6 +432,7 @@ org.elasticsearch.indices.IndicesFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.index.mapper.MapperFeatures, + org.elasticsearch.ingest.IngestGeoIpFeatures, org.elasticsearch.search.SearchFeatures, org.elasticsearch.script.ScriptFeatures, org.elasticsearch.search.retriever.RetrieversFeatures, @@ -465,4 +466,5 @@ org.elasticsearch.serverless.shardhealth, org.elasticsearch.serverless.apifiltering; exports org.elasticsearch.lucene.spatial; + } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index ff50d1513d28a..7ac9b1a3f8013 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -209,6 +209,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_GOOGLE_VERTEX_AI_RERANKING_ADDED = def(8_700_00_0); public static final TransportVersion VERSIONED_MASTER_NODE_REQUESTS = def(8_701_00_0); public static final TransportVersion ML_INFERENCE_AMAZON_BEDROCK_ADDED = def(8_702_00_0); + public static final TransportVersion ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15 = def(8_702_00_1); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java new file mode 100644 index 0000000000000..a55553c8f0fd7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * As a relatively minor hack, this class holds the string constant that defines both the id + * and the name of the task for the new ip geolocation database downloader feature. It also provides the + * PersistentTaskParams that are necessary to start the task and to run it. + *

+ * Defining this in Elasticsearch itself gives us a reasonably tidy version of things where we don't + * end up with strange inter-module dependencies. It's not ideal, but it works fine. + */ +public final class EnterpriseGeoIpTask { + + private EnterpriseGeoIpTask() { + // utility class + } + + public static final String ENTERPRISE_GEOIP_DOWNLOADER = "enterprise-geoip-downloader"; + public static final NodeFeature GEOIP_DOWNLOADER_DATABASE_CONFIGURATION = new NodeFeature("geoip.downloader.database.configuration"); + + public static class EnterpriseGeoIpTaskParams implements PersistentTaskParams { + + public static final ObjectParser PARSER = new ObjectParser<>( + ENTERPRISE_GEOIP_DOWNLOADER, + true, + EnterpriseGeoIpTaskParams::new + ); + + public EnterpriseGeoIpTaskParams() {} + + public EnterpriseGeoIpTaskParams(StreamInput in) {} + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return ENTERPRISE_GEOIP_DOWNLOADER; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15; + } + + @Override + public void writeTo(StreamOutput out) {} + + public static EnterpriseGeoIpTaskParams fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + return obj instanceof EnterpriseGeoIpTaskParams; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java b/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java new file mode 100644 index 0000000000000..0d989ad9f7ab2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +import static org.elasticsearch.ingest.EnterpriseGeoIpTask.GEOIP_DOWNLOADER_DATABASE_CONFIGURATION; + +public class IngestGeoIpFeatures implements FeatureSpecification { + public Set getFeatures() { + return Set.of(GEOIP_DOWNLOADER_DATABASE_CONFIGURATION); + } +} diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index a9d9c6a5a1938..054eb7964e9ef 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -14,6 +14,7 @@ org.elasticsearch.rest.RestFeatures org.elasticsearch.indices.IndicesFeatures org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures org.elasticsearch.index.mapper.MapperFeatures +org.elasticsearch.ingest.IngestGeoIpFeatures org.elasticsearch.search.SearchFeatures org.elasticsearch.search.retriever.RetrieversFeatures org.elasticsearch.script.ScriptFeatures diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index b2b19f14cfd4b..42c42c309f931 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -1825,9 +1825,9 @@ public void testBulkRequestExecution() throws Exception { for (int i = 0; i < numRequest; i++) { IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); indexRequest.source(xContentType, "field1", "value1"); - boolean shouldListExecutedPiplines = randomBoolean(); - executedPipelinesExpected.add(shouldListExecutedPiplines); - indexRequest.setListExecutedPipelines(shouldListExecutedPiplines); + boolean shouldListExecutedPipelines = randomBoolean(); + executedPipelinesExpected.add(shouldListExecutedPipelines); + indexRequest.setListExecutedPipelines(shouldListExecutedPipelines); bulkRequest.add(indexRequest); } diff --git a/test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java b/test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java new file mode 100644 index 0000000000000..9a5205f66d1f4 --- /dev/null +++ b/test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package fixture.geoip; + +import com.sun.net.httpserver.HttpServer; + +import org.elasticsearch.common.hash.MessageDigests; +import org.junit.rules.ExternalResource; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.security.MessageDigest; + +/** + * This fixture is used to simulate a maxmind-provided server for downloading maxmind geoip database files from the + * EnterpriseGeoIpDownloader. It can be used by integration tests so that they don't actually hit maxmind servers. + */ +public class EnterpriseGeoIpHttpFixture extends ExternalResource { + + private final Path source; + private final boolean enabled; + private final String[] databaseTypes; + private HttpServer server; + + /* + * The values in databaseTypes must be in DatabaseConfiguration.MAXMIND_NAMES, and must be one of the databases copied in the + * copyFiles method of thisi class. + */ + public EnterpriseGeoIpHttpFixture(boolean enabled, String... databaseTypes) { + this.enabled = enabled; + this.databaseTypes = databaseTypes; + try { + this.source = Files.createTempDirectory("source"); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public String getAddress() { + return "http://" + server.getAddress().getHostString() + ":" + server.getAddress().getPort() + "/"; + } + + @Override + protected void before() throws Throwable { + if (enabled) { + copyFiles(); + this.server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + + // for expediency reasons, it is handy to have this test fixture be able to serve the dual purpose of actually stubbing + // out the download protocol for downloading files from maxmind (see the looped context creation after this stanza), as + // we as to serve an empty response for the geoip.elastic.co service here + this.server.createContext("/", exchange -> { + String response = "[]"; // an empty json array + exchange.sendResponseHeaders(200, response.length()); + try (OutputStream os = exchange.getResponseBody()) { + os.write(response.getBytes(StandardCharsets.UTF_8)); + } + }); + + // register the file types for the download fixture + for (String databaseType : databaseTypes) { + createContextForEnterpriseDatabase(databaseType); + } + + server.start(); + } + } + + private void createContextForEnterpriseDatabase(String databaseType) { + this.server.createContext("/" + databaseType + "/download", exchange -> { + exchange.sendResponseHeaders(200, 0); + if (exchange.getRequestURI().toString().contains("sha256")) { + MessageDigest sha256 = MessageDigests.sha256(); + try (InputStream inputStream = GeoIpHttpFixture.class.getResourceAsStream("/geoip-fixture/" + databaseType + ".tgz")) { + sha256.update(inputStream.readAllBytes()); + } + exchange.getResponseBody() + .write( + (MessageDigests.toHexString(sha256.digest()) + " " + databaseType + "_20240709.tar.gz").getBytes( + StandardCharsets.UTF_8 + ) + ); + } else { + try ( + OutputStream outputStream = exchange.getResponseBody(); + InputStream inputStream = GeoIpHttpFixture.class.getResourceAsStream("/geoip-fixture/" + databaseType + ".tgz") + ) { + inputStream.transferTo(outputStream); + } + } + exchange.getResponseBody().close(); + }); + } + + @Override + protected void after() { + if (enabled) { + server.stop(0); + } + } + + private void copyFiles() throws Exception { + for (String databaseType : databaseTypes) { + Files.copy( + GeoIpHttpFixture.class.getResourceAsStream("/geoip-fixture/GeoIP2-City.tgz"), + source.resolve(databaseType + ".tgz"), + StandardCopyOption.REPLACE_EXISTING + ); + } + } +} diff --git a/test/fixtures/geoip-fixture/src/main/resources/geoip-fixture/GeoIP2-City.tgz b/test/fixtures/geoip-fixture/src/main/resources/geoip-fixture/GeoIP2-City.tgz new file mode 100644 index 0000000000000000000000000000000000000000..76dd40000f1324a6d5857bb163f70aabf6bb9ccd GIT binary patch literal 6377 zcmYj#2Q*yW7w;elf`lN1NEjqhql+?HqD3Z%kLV#e)j@2s=$KKtz7-h1vjYdFKMLR8@2Y#?OYDefL7ZKLf2H@nJ^ zJvoV1~{kg?q*UduV)^=P-zr9+}Zi2-3{aHN?6-hi1C(Pd5%8pPa2hUKi+D}YjE^-c653l zp@23pHJwSFC=XwmmZ_5&leqi!Pik$B_XpFK)K~brzM>TCztS(H5}wVhjilqvPLcgR zZ}ljG>ycxer@^PvQ=v=%h8@=$cVd2Ezwc^>PQZsJJWpN~Njn^_6=-6;({<&7Oh$~m zwA?PwAx|o8_HX~)w4ajO8>)^gs9zJe%>lJyNf>h1F3*n6mAg_!rK@q^djzTxK#Y$5 z>2o=2@;;iLpnd0Feug@l4nG6ta15%}j3Ln1{9J7?lIW*-*X~ocLzq&t(`^NmzhR{< zpLV1Kgp7kn(*x9BG*Eg%1x_*8(*UnOB^@ zYr*JUN)W z$WD0o1m~x)3s`p_?m=i#Oa$~Xb@Uv0gitnD|h`k;eAzKn;`TG*?@Va9uZ1O(##K_2iW9NDk5cy+*ReXo{^F4D8Omcfb|k z(Rc2nB6Y*Gxh8sDw_U%}mG!O6x5J- z!+7y!pD~M5%t^5l9^tf>_{d`?R~7ay`Ze|0g-t}g|N7cj^BXhGHB2%nC0tM5WTkjc zNztDt)(0_l;loa9>*78RClYG2T5fzxQeVy)HEYAxo#I^S+4k9#o5**_cGgnqYuuTT z*{C(HZQ0w0cbyrkCg@9L*6zT1=f4rJ`U_5fza}{lxxXZQT|R=62$8$-enlgz`5BD- z{DJ0YHV+C9W)C`#JK9!^=UfJ@i+hL&&#?9E_5Fkpvk>KC(qfWg#)cc&4I$0cG%Jqt z99izaF~`z4DODo52j>aoZ4?1Siu9 z_7-JIJAvSFCm6_MZM#*pum2w(0aLAq$m-R ze7`LI$|gEd%MV|?$iRT76Dss=6;SP`_z*UL}SG{}dzFUv5;wZE;~semQ$ zMJV>CgTR`9L*U9yc&h%S%-q_bO#dQ_{rpjlz*@94r|G{s&rCX_|3ueZ%?$J9R2Ae$ zhG}V-Fh4PSzDT`XFB)1O+CKTQhOLI;WfR6luj*ME_Yq3NlmfvR1U zk_!vfI=Y5WvySCN@GWP^S@a5t4<0V3>=zmE+L>!V!@uYBdPp!>priYDJg)f=sn$99 z>aIJWJp#dg@PUTqhH~Fak4VN{3qgFpj)Tj=7s;1BZ=8rhE*OQsXLWpE%Rntw>e4z( z?`qX|H3g0yGKW;N?%u=Eg}7@)MtY^RnrAi7^6isoo?f>6t&`SF*Emuy%|++$2%}Kl zEab>CeZCcmK#+4`~zRUH~*s zP_E^=%(tyBVvS=10KQC=Yr%aQx@-mlUv5kQU_d1k`xE8L7qbO)*+Ktj9*lQ=O5kgP zJQL0?Ex22tp}cC1K`{_ICQR~jJWgUja~_Q`Sgr6SL_yC`u0pQ;TvPzO8Hn8jwxUZ6 zaXZ*M(BrRM$D~KRweXv#Pv*6tnW(ZynhvBHsItJzC3lSa%Bg_Jx#(2-3NjjYq~#s} zJ%>#&G0o88UhuF>gM=2WyQDRYyZ?o>PC`l=I}$;X#*jPX-EHkq*BERR$vZzqDPBL@=BGn@3);_{wt* zqjFa(BX%)rC!O;#t3hS|+PAzrGB*aYOb=ttJBo@k27s6klQ+VziuWhUC-M&}ENJ53SGc=a=# zo2RemDPmwDT+Ua#HNoOJhYmUMo*v>UHd%Bgg?%fb&QI8IaAg3BgU`}UlPChqX;|m1 z<{i=<3XkpCFo8W_*=th*{FOASgR~UX4vxQgUbg^?CL2pn78&I!{$u^oyvsa2lpBC6 zP@PAE)#xIcuf#&p6zMK+wK((cZ*8ol-HR2zxt;XpLN6r-giGFFF&UhaJijeF=u{S8 zHrU)(t~t_qpvzqyY@#{tE`q_SK-aM^y$?Ny37HJe1&8H>x~RvYNe5x%09*;U$zSs0 zZg#43o?DmROKU}${72ts)HW%8lw#fj`EP*@t`0IrpC4H*>zYZb)C7Y;x;8>?)!o1| zxWvbXicUa}97+4^9yqPkj^lY6iifiXRK2@pV2zR-=b+{@A?MH=gh_Dt0J-x(e)IJ$ zQbpjU#0#hDzJw>2pbi3u5@-h!ZGrsXc>W8@kC@+lv3BqGqoHZ#ND1Quz}MwK^GaQrKj7aVs`9f^EJ&6eTWanVDG8&)q#}d_`1PgT(xm<0ST;(8sws5|Ahlzvn;^SKA3hfkrn8ioUl3M3QB>k!>*;J4H&Tfy zQhB~^z7SPB4h$nD-sb5H!rpU3$_q14VC?enzc%f!F^0g%1;)v88e~`OnRv6>sF1rd z-ld?fU|HRv964F+GvzbIvGpp{QrLO4SYsxnVh-X?;!bu9IVJ^fD(`N=7``1q_GERN z)Z!_>R|NY6YsmXmsA<_vtK+4)0Eirk9M$;^59$-fJ@LJ3djfmx$!=82AQ`xNDUdtM zZH?c3uPc0S+j^d9ULW#Wl1LIT3z@w#OFBz2OEUXDQN-6)S%PrPqPxqzO*XGG&prQI zPK5H_7)+jdz)SP5vrr6AzY>H2D{fC4M*?0+GDx2sbF~t3rFi7&GWW4pGN==;J|mTM zDN(b#J$Y@MJ@Fcn1WAP?N6Py+`qQ`h3%F4CeLNZvE0xZuBKwI_iibKJV(^ z0GLvG2)S@BaI2GBCDiFzK|n5C2^x?|9igd@OA1%; z=UqJX4yPBuO|XCD{-5WQ<>zQEf?WFw=3}M(I=ayZ7uX4&ZA-*=R+e+k02s5^gsslV z9+(nmu6|E~y%6J0Y9j?UU)5X-EZ`;MIaT&vFk#(2qXH`2)L`?R+ufh6IVOd~Hj zH*trb!Nbo6LTchiK%bUFdjL*F-QWK^C&?XCVFs4yUn0Nu0N&D-6!A!Lu!%q$)}fPg z`1dRgX%G8p)ZaCqwLkoq$^B0VE4DWplnMH(Pl;&nH1Ts7EdXyKuWvB>4f_2I*@5BD zu$17CmiFIuB%lHhT-ktIg11oX!6prJ!HEP>s{4v$;AdMTsM`?eZr7VIjMJr31bEY~ zGfDb(|Ip0Ts1be8vWBugIWI=)S%_J@vk?8}sKb_CCP=dRvhetmO@6{a;Ktkgy@&*a zTA2*W_TyvN$Qoj7V=5tJfHx?~TbSeLzN=E=&JJF0!lYeHzV^m&w3dQn>#zJ^g(YIL zA>#Ee&W!O`jNd;1y)te*^$gkNiPG;LSh4?9-(7H?V|EB*`@3=ui+&`ndPL`ge2|Cuu}7TQQ2QK zo9!qIt9@n|x`&s-vq)-%K1d~}w66+~U^E`tK=`F@9K0)N?zPD0ku@6_edZ=8(%?IH zJLhbsJOs~gPbb!5*-no9_$P6EakXR)z9wlKEqFl4H^Kbfr|?JiYq4nY)8%el4f0Dx zSIE#mjtI`ot*#n35<(32P)rxTR9LY_#yj3AQE8Iq__=)Vs)y*uij;!sXZxXsE!$4r2>%Ln-=sZ#{Txxgs?9Nx51FJjx76^Py=bxGtJ;wXUD+RVDEuskheiy4SZPEp6d}snIBuCcQ$E!b!Ba7&9AOGQ zi8qSg7J2G0CUDRn*7_QykLZpyI9KD_TU6%{c~d)X^DL0Rnu8wNL?>KpW;c+Q!`5}R zI?TvaI4oT*=qW1e$kIJ(|H1TPtQ8cnQ63(8O2*)0v3 z5Mtc4?LN^kqv@g;9)H7~d;At?uUe^H#>{+6votSRSX_A_Xe?EYjHs0LO52ev&E9Vd zTXk6(%}Ed>DIOt#wG?Xn`YjAg6%+s9o@nMIwtN3I0Q*&w>KGJc&$u~JlI_Gn`C;E3 zf8_mvDgLX!V`ouHP54=qY307>ufEBl_HSlU2Q_3LXjpP*1S0A}SkJzMR0^$c*tnYy`HvxTmn&rnKeAUF(R>=MT^aSZyzqSFWEW7 z)-~$~F+tOo%!QhqF%}IuKpNk+p5W56t&CHpgc&Y*{pkNil zbK+MiIg8NRQhK#Q#$mAmHxDnTCknr&TaO%e|2mqEj7~Z{W^u<=4LCSDKI)q(Vv4+D zWhL9N+PlpV-B8z)tzg|Db!Tkc@sB-XE@Jw+xP8T^LgRN< zn1M~<*hN>Px24AZT^RIpa%4J;-&OC`a(w^5;SbF{1*t+gE*Wq0vNvP4?(Xk*|J3AF z%bN`keN^nTs;0Jadu5&H=r)S%pn>&oc2teGvzn3h1@e9cgbLcdEr*OPgNtmR;h+=-%n%9mC(1-8+6$SCY{SA3wnbJcu`}3guNWaTrT1k#u9~BLs0sor(Nu zg(pj*$-@ID$GQr_5w)Kir`+EoTUG~pPLMjK7``TJE6Rwmoz_gxMQXhXrGcAg(&>G; z#h8XATE_8;1*3uN=L$ou%*=E$*?eZpdP<)9QGJtYHhXo`Y`03%v@L73E%~PwBv~U( z3@ls!7QcC7I$!uo&nTmfAve|DN-UuK#NnT}&IQb$yyWNtTVez!sF?=PbBF`>`%Y<# zdGIq9pN)jhNPR)wdBlwPk)xwT)~u(d1p1Pv?RBnM05KgmEP$P+qC6pr+<~xD{vMM zrQHUlBpqDDb6O|pWWu5c0$S3U@{62O<{dl6%D*=_Sc>-U2bJsSuO%Al$&Rh{q)ygi zD)Vz1oxnd|4Q9`tE;u}@oQ_VQ+ggg%w|OJeFV_=XuhP=%>Mlw%%+g7#=kFHxiP+re z8Ko(79Ng0C6W?Rb`g8pmb7NAuoRsL6d&Q|GrSVV@+Z{}x1}vrXO3QQI%f^ zpIt=Ysu44$1=A#VV|BRpy|uAm?;cw>Gy&l+K_s6-uBRbAFRJSwPo@~xEf%7essSSY z7+4({n3z99m49%^n>)EcqdFX1#%kAVeeW8dhoV{g1G+r95Yj(jCU)8dvKCA+?%l~T zwRS7hCdxrGV*%*ZEVi>(knQ}e$)9XjPdZx(&IHeL(LDo~2U}5e%dRaG@`!$uN_+k7 zsOnF77IFx}M$(0MVp&^1ZlbBM@aI-#_`_r+Mhs)1f9S5!;0o=23Yl6$2RX(ODaa|1 zz)Dy9j=A8&4~x@5UZMA$ODzGxuceHY;w5&jd?w9F;`q&8H@x#=iEdQppGE~fewz=D zrlq-C--3$_+QmZNdSJxAJir*d%ffSp_G#Vh*3K}TYEBCLIg#YWEmpgp1cVgp3)wp& zH_kn)fu?By|IWs_20-*Vj@JMHM={;N>@d%QTlRRYP=IKoMzD+|xi{B+Pg!Z%>#KSi zEPb+%(JL3JG_+?18zv=I0muKABy^EeeU`E=lc^$;S>lOh-3;CL_2=v-kXb&g-S26+ VZT`Op;4}kha-@ERLR2A;{{wE_+r createComponents(PluginServices services) { + enterpriseGeoIpDownloaderLicenseListener = new EnterpriseGeoIpDownloaderLicenseListener( + services.client(), + services.clusterService(), + services.threadPool(), + getLicenseState() + ); + enterpriseGeoIpDownloaderLicenseListener.init(); + return List.of(enterpriseGeoIpDownloaderLicenseListener); + } +} diff --git a/x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java b/x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java new file mode 100644 index 0000000000000..d6e6f57f10976 --- /dev/null +++ b/x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.geoip; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.ingest.EnterpriseGeoIpTask.EnterpriseGeoIpTaskParams; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseStateListener; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.xpack.core.XPackField; + +import java.util.Objects; + +import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER; + +public class EnterpriseGeoIpDownloaderLicenseListener implements LicenseStateListener, ClusterStateListener { + private static final Logger logger = LogManager.getLogger(EnterpriseGeoIpDownloaderLicenseListener.class); + // Note: This custom type is GeoIpMetadata.TYPE, but that class is not exposed to this plugin + static final String INGEST_GEOIP_CUSTOM_METADATA_TYPE = "ingest_geoip"; + + private final PersistentTasksService persistentTasksService; + private final ClusterService clusterService; + private final XPackLicenseState licenseState; + private static final LicensedFeature.Momentary ENTERPRISE_GEOIP_FEATURE = LicensedFeature.momentary( + null, + XPackField.ENTERPRISE_GEOIP_DOWNLOADER, + License.OperationMode.PLATINUM + ); + private volatile boolean licenseIsValid = false; + private volatile boolean hasIngestGeoIpMetadata = false; + + protected EnterpriseGeoIpDownloaderLicenseListener( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + XPackLicenseState licenseState + ) { + this.persistentTasksService = new PersistentTasksService(clusterService, threadPool, client); + this.clusterService = clusterService; + this.licenseState = licenseState; + } + + @UpdateForV9 // use MINUS_ONE once that means no timeout + private static final TimeValue MASTER_TIMEOUT = TimeValue.MAX_VALUE; + private volatile boolean licenseStateListenerRegistered; + + public void init() { + listenForLicenseStateChanges(); + clusterService.addListener(this); + } + + void listenForLicenseStateChanges() { + assert licenseStateListenerRegistered == false : "listenForLicenseStateChanges() should only be called once"; + licenseStateListenerRegistered = true; + licenseState.addListener(this); + } + + @Override + public void licenseStateChanged() { + licenseIsValid = ENTERPRISE_GEOIP_FEATURE.checkWithoutTracking(licenseState); + maybeUpdateTaskState(clusterService.state()); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + hasIngestGeoIpMetadata = event.state().metadata().custom(INGEST_GEOIP_CUSTOM_METADATA_TYPE) != null; + final boolean ingestGeoIpCustomMetaChangedInEvent = event.metadataChanged() + && event.changedCustomMetadataSet().contains(INGEST_GEOIP_CUSTOM_METADATA_TYPE); + final boolean masterNodeChanged = Objects.equals( + event.state().nodes().getMasterNode(), + event.previousState().nodes().getMasterNode() + ) == false; + /* + * We don't want to potentially start the task on every cluster state change, so only maybeUpdateTaskState if this cluster change + * event involved the modification of custom geoip metadata OR a master node change + */ + if (ingestGeoIpCustomMetaChangedInEvent || (masterNodeChanged && hasIngestGeoIpMetadata)) { + maybeUpdateTaskState(event.state()); + } + } + + private void maybeUpdateTaskState(ClusterState state) { + // We should only start/stop task from single node, master is the best as it will go through it anyway + if (state.nodes().isLocalNodeElectedMaster()) { + if (licenseIsValid) { + if (hasIngestGeoIpMetadata) { + ensureTaskStarted(); + } + } else { + ensureTaskStopped(); + } + } + } + + private void ensureTaskStarted() { + assert licenseIsValid : "Task should never be started without valid license"; + persistentTasksService.sendStartRequest( + ENTERPRISE_GEOIP_DOWNLOADER, + ENTERPRISE_GEOIP_DOWNLOADER, + new EnterpriseGeoIpTaskParams(), + MASTER_TIMEOUT, + ActionListener.wrap(r -> logger.debug("Started enterprise geoip downloader task"), e -> { + Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e; + if (t instanceof ResourceAlreadyExistsException == false) { + logger.error("failed to create enterprise geoip downloader task", e); + } + }) + ); + } + + private void ensureTaskStopped() { + ActionListener> listener = ActionListener.wrap( + r -> logger.debug("Stopped enterprise geoip downloader task"), + e -> { + Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e; + if (t instanceof ResourceNotFoundException == false) { + logger.error("failed to remove enterprise geoip downloader task", e); + } + } + ); + persistentTasksService.sendRemoveRequest(ENTERPRISE_GEOIP_DOWNLOADER, MASTER_TIMEOUT, listener); + } +} diff --git a/x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java b/x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java new file mode 100644 index 0000000000000..5a5aacd392f3c --- /dev/null +++ b/x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.geoip; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.license.License; +import org.elasticsearch.license.TestUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.RemovePersistentTaskAction; +import org.elasticsearch.persistent.StartPersistentTaskAction; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; +import java.util.UUID; + +import static org.elasticsearch.xpack.geoip.EnterpriseGeoIpDownloaderLicenseListener.INGEST_GEOIP_CUSTOM_METADATA_TYPE; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class EnterpriseGeoIpDownloaderLicenseListenerTests extends ESTestCase { + + private ThreadPool threadPool; + + @Before + public void setup() { + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testAllConditionsMetOnStart() { + // Should never start if not master node, even if all other conditions have been met + final XPackLicenseState licenseState = getAlwaysValidLicense(); + ClusterService clusterService = createClusterService(true, false); + TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, true, false); + EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener( + client, + clusterService, + threadPool, + licenseState + ); + listener.init(); + listener.licenseStateChanged(); + listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, true), clusterService.state())); + client.assertTaskStartHasBeenCalled(); + } + + public void testLicenseChanges() { + final TestUtils.UpdatableLicenseState licenseState = new TestUtils.UpdatableLicenseState(); + licenseState.update(new XPackLicenseStatus(License.OperationMode.TRIAL, false, "")); + ClusterService clusterService = createClusterService(true, true); + TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, false, true); + EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener( + client, + clusterService, + threadPool, + licenseState + ); + listener.init(); + listener.licenseStateChanged(); + listener.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), clusterService.state())); + client.expectStartTask = true; + client.expectRemoveTask = false; + licenseState.update(new XPackLicenseStatus(License.OperationMode.TRIAL, true, "")); + listener.licenseStateChanged(); + client.assertTaskStartHasBeenCalled(); + client.expectStartTask = false; + client.expectRemoveTask = true; + licenseState.update(new XPackLicenseStatus(License.OperationMode.TRIAL, false, "")); + listener.licenseStateChanged(); + client.assertTaskRemoveHasBeenCalled(); + } + + public void testDatabaseChanges() { + final XPackLicenseState licenseState = getAlwaysValidLicense(); + ClusterService clusterService = createClusterService(true, false); + TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, false, false); + EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener( + client, + clusterService, + threadPool, + licenseState + ); + listener.init(); + listener.licenseStateChanged(); + listener.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), clusterService.state())); + // add a geoip database, so the task ought to be started: + client.expectStartTask = true; + listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, true), clusterService.state())); + client.assertTaskStartHasBeenCalled(); + // Now we remove the geoip databases. The task ought to just be left alone. + client.expectStartTask = false; + client.expectRemoveTask = false; + listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, false), clusterService.state())); + } + + public void testMasterChanges() { + // Should never start if not master node, even if all other conditions have been met + final XPackLicenseState licenseState = getAlwaysValidLicense(); + ClusterService clusterService = createClusterService(false, false); + TaskStartAndRemoveMockClient client = new TaskStartAndRemoveMockClient(threadPool, false, false); + EnterpriseGeoIpDownloaderLicenseListener listener = new EnterpriseGeoIpDownloaderLicenseListener( + client, + clusterService, + threadPool, + licenseState + ); + listener.init(); + listener.licenseStateChanged(); + listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(false, true), clusterService.state())); + client.expectStartTask = true; + listener.clusterChanged(new ClusterChangedEvent("test", createClusterState(true, true), clusterService.state())); + } + + private XPackLicenseState getAlwaysValidLicense() { + return new XPackLicenseState(() -> 0); + } + + private ClusterService createClusterService(boolean isMasterNode, boolean hasGeoIpDatabases) { + ClusterService clusterService = mock(ClusterService.class); + ClusterState state = createClusterState(isMasterNode, hasGeoIpDatabases); + when(clusterService.state()).thenReturn(state); + return clusterService; + } + + private ClusterState createClusterState(boolean isMasterNode, boolean hasGeoIpDatabases) { + String indexName = randomAlphaOfLength(5); + Index index = new Index(indexName, UUID.randomUUID().toString()); + IndexMetadata.Builder idxMeta = IndexMetadata.builder(index.getName()) + .settings(indexSettings(IndexVersion.current(), 1, 0).put("index.uuid", index.getUUID())); + String nodeId = ESTestCase.randomAlphaOfLength(8); + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(DiscoveryNodeUtils.create(nodeId)).localNodeId(nodeId); + if (isMasterNode) { + discoveryNodesBuilder.masterNodeId(nodeId); + } + ClusterState.Builder clusterStateBuilder = ClusterState.builder(new ClusterName("name")); + if (hasGeoIpDatabases) { + PersistentTasksCustomMetadata tasksCustomMetadata = new PersistentTasksCustomMetadata(1L, Map.of()); + clusterStateBuilder.metadata(Metadata.builder().putCustom(INGEST_GEOIP_CUSTOM_METADATA_TYPE, tasksCustomMetadata).put(idxMeta)); + } + return clusterStateBuilder.nodes(discoveryNodesBuilder).build(); + } + + private static class TaskStartAndRemoveMockClient extends NoOpClient { + + boolean expectStartTask; + boolean expectRemoveTask; + private boolean taskStartCalled = false; + private boolean taskRemoveCalled = false; + + private TaskStartAndRemoveMockClient(ThreadPool threadPool, boolean expectStartTask, boolean expectRemoveTask) { + super(threadPool); + this.expectStartTask = expectStartTask; + this.expectRemoveTask = expectRemoveTask; + } + + @Override + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + if (action.equals(StartPersistentTaskAction.INSTANCE)) { + if (expectStartTask) { + taskStartCalled = true; + } else { + fail("Should not start task"); + } + } else if (action.equals(RemovePersistentTaskAction.INSTANCE)) { + if (expectRemoveTask) { + taskRemoveCalled = true; + } else { + fail("Should not remove task"); + } + } else { + throw new IllegalStateException("unexpected action called [" + action.name() + "]"); + } + } + + void assertTaskStartHasBeenCalled() { + assertTrue(taskStartCalled); + } + + void assertTaskRemoveHasBeenCalled() { + assertTrue(taskRemoveCalled); + } + } +} diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index c04d531ce3fdb..2e92b646a45e4 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -39,6 +39,9 @@ public class Constants { "cluster:admin/indices/dangling/find", "cluster:admin/indices/dangling/import", "cluster:admin/indices/dangling/list", + "cluster:admin/ingest/geoip/database/delete", + "cluster:admin/ingest/geoip/database/get", + "cluster:admin/ingest/geoip/database/put", "cluster:admin/ingest/pipeline/delete", "cluster:admin/ingest/pipeline/get", "cluster:admin/ingest/pipeline/put", From f047c417cfe366f67e6306a8400f34b6f4bea1f1 Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Fri, 19 Jul 2024 11:57:12 +0300 Subject: [PATCH 085/406] Correct force merge disk space requirements (#111066) (#111087) Correct force merge disk space requirements --- docs/reference/indices/forcemerge.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc index 1d473acbd5d48..6eacaac5e7b2a 100644 --- a/docs/reference/indices/forcemerge.asciidoc +++ b/docs/reference/indices/forcemerge.asciidoc @@ -89,8 +89,9 @@ one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel. Force merge makes the storage for the shard being merged temporarily -increase, up to double its size in case `max_num_segments` parameter is set to -`1`, as all segments need to be rewritten into a new one. +increase, as it may require free space up to triple its size in case +`max_num_segments` parameter is set to `1`, to rewrite all segments into a new +one. [[forcemerge-api-path-params]] ==== {api-path-parms-title} From 0e571fd51a2f4c4e57c3a23a6baf76b1775ef32b Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Fri, 19 Jul 2024 11:24:56 +0200 Subject: [PATCH 086/406] ESQL: Skip retrofitted tests (#111019) (#111088) (cherry picked from commit 548aea56ce48e95cf79a201d75a784bb9f17d45c) # Conflicts: # muted-tests.yml # x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java --- .../testFixtures/src/main/resources/enrich.csv-spec | 13 ++++++------- .../testFixtures/src/main/resources/eval.csv-spec | 2 +- .../testFixtures/src/main/resources/keep.csv-spec | 10 +++++----- .../testFixtures/src/main/resources/stats.csv-spec | 2 +- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec index cf32e028b23bc..ab2ddb84ed969 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec @@ -69,7 +69,7 @@ ROW left = "left", foo = "foo", client_ip = "172.21.0.5", env = "env", right = " left:keyword | client_ip:keyword | env:keyword | right:keyword | foo:keyword ; -shadowingSubfields +shadowingSubfields#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] required_capability: enrich_load FROM addresses | KEEP city.country.continent.planet.name, city.country.name, city.name @@ -84,8 +84,7 @@ United States of America | South San Francisco | San Francisco Int'l Japan | Tokyo | null ; -shadowingSubfieldsLimit0 -required_capability: enrich_load +shadowingSubfieldsLimit0#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] FROM addresses | KEEP city.country.continent.planet.name, city.country.name, city.name | EVAL city.name = REPLACE(city.name, "San Francisco", "South San Francisco") @@ -135,7 +134,7 @@ ROW left = "left", airport = "Zurich Airport ZRH", city = "Zürich", middle = "m left:keyword | city:keyword | middle:keyword | right:keyword | airport:text | region:text | city_boundary:geo_shape ; -shadowingInternal +shadowingInternal#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] required_capability: enrich_load ROW city = "Zürich" | ENRICH city_names ON city WITH x = airport, x = region @@ -145,7 +144,7 @@ city:keyword | x:text Zürich | Bezirk Zürich ; -shadowingInternalImplicit +shadowingInternalImplicit#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] required_capability: enrich_load ROW city = "Zürich" | ENRICH city_names ON city WITH airport = region @@ -155,7 +154,7 @@ city:keyword | airport:text Zürich | Bezirk Zürich ; -shadowingInternalImplicit2 +shadowingInternalImplicit2#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] required_capability: enrich_load ROW city = "Zürich" | ENRICH city_names ON city WITH airport, airport = region @@ -165,7 +164,7 @@ city:keyword | airport:text Zürich | Bezirk Zürich ; -shadowingInternalImplicit3 +shadowingInternalImplicit3#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] required_capability: enrich_load ROW city = "Zürich" | ENRICH city_names ON city WITH airport = region, airport diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 87f54fbf0f174..770358e5120da 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -15,7 +15,7 @@ left:keyword | right:keyword | x:integer left | right | 1 ; -shadowingSubfields +shadowingSubfields#[skip:-8.13.3,reason:fixed in 8.13] FROM addresses | KEEP city.country.continent.planet.name, city.country.name, city.name | EVAL city.country.continent.planet.name = to_upper(city.country.continent.planet.name) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec index bcce35eb81e0f..6bc534a9fd918 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec @@ -540,7 +540,7 @@ c:i 1 ; -shadowingInternal +shadowingInternal#[skip:-8.13.3,reason:fixed in 8.13] FROM employees | SORT emp_no ASC | KEEP last_name, emp_no, last_name @@ -552,7 +552,7 @@ emp_no:integer | last_name:keyword 10002 | Simmel ; -shadowingInternalWildcard +shadowingInternalWildcard#[skip:-8.13.3,reason:fixed in 8.13] FROM employees | SORT emp_no ASC | KEEP last*name, emp_no, last*name, first_name, last*, gender, last* @@ -564,7 +564,7 @@ emp_no:integer | first_name:keyword | gender:keyword | last_name:keyword 10002 | Bezalel | F | Simmel ; -shadowingInternalWildcardAndExplicit +shadowingInternalWildcardAndExplicit#[skip:-8.13.3,reason:fixed in 8.13] FROM employees | SORT emp_no ASC | KEEP last*name, emp_no, last_name, first_name, last*, languages, last_name, gender, last*name @@ -576,7 +576,7 @@ emp_no:integer | first_name:keyword | languages:integer | last_name:keyword | ge 10002 | Bezalel | 5 | Simmel | F ; -shadowingSubfields +shadowingSubfields#[skip:-8.13.3,reason:fixed in 8.13] FROM addresses | KEEP city.country.continent.planet.name, city.country.continent.name, city.country.name, city.name, city.country.continent.planet.name | SORT city.name @@ -588,7 +588,7 @@ North America | United States of America | San Francisco Asia | Japan | Tokyo | Earth ; -shadowingSubfieldsWildcard +shadowingSubfieldsWildcard#[skip:-8.13.3,reason:fixed in 8.13] FROM addresses | KEEP *name, city.country.continent.planet.name | SORT city.name diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 9558cf235b847..b2080b54b981c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1829,7 +1829,7 @@ x:integer 10001 ; -shadowingInternalWithGroup +shadowingInternalWithGroup#[skip:-8.14.1,reason:implemented in 8.14] FROM employees | STATS x = MAX(emp_no), x = MIN(emp_no) BY x = gender | SORT x ASC From 130fc55cc5dc140c8c810fed79185f9d9874c067 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 19 Jul 2024 15:39:30 +0200 Subject: [PATCH 087/406] Update Gradle wrapper to 8.9 (#110109) (#111095) (cherry picked from commit cb0ecb86df8990e70a7185360794b0640e28dffd) --- .../gradle/wrapper/gradle-wrapper.properties | 4 +- .../src/main/resources/minimumGradleVersion | 2 +- convert-deps.groovy | 24 + generate-version-catalog.groovy | 319 ++ gradle/wrapper/gradle-wrapper.jar | Bin 43453 -> 43504 bytes gradle/wrapper/gradle-wrapper.properties | 4 +- gradlew | 5 +- gradlew.bat | 2 + jdks.log | 3146 +++++++++++++++++ list-plain-deps.groovy | 68 + .../gradle/wrapper/gradle-wrapper.properties | 4 +- versions.log | 595 ++++ 12 files changed, 4165 insertions(+), 8 deletions(-) create mode 100644 convert-deps.groovy create mode 100644 generate-version-catalog.groovy create mode 100644 jdks.log create mode 100644 list-plain-deps.groovy create mode 100644 versions.log diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index 515ab9d5f1822..efe2ff3449216 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=f8b4f4772d302c8ff580bc40d0f56e715de69b163546944f787c87abf209c961 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-all.zip +distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index 83ea3179ddacc..f7b1c8ff61774 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.8 \ No newline at end of file +8.9 \ No newline at end of file diff --git a/convert-deps.groovy b/convert-deps.groovy new file mode 100644 index 0000000000000..23b2a8dafb496 --- /dev/null +++ b/convert-deps.groovy @@ -0,0 +1,24 @@ +import groovy.io.FileType +import java.nio.file.* +import java.nio.charset.StandardCharsets +import java.util.regex.Pattern + +// Define the base directory to start the search +def baseDir = new File('/Users/rene/dev/elastic/elasticsearch/plugins') + def pattern = Pattern.compile(/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/) + +// Define the pattern to match dependencies +def dependencyPattern = ~/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/ + +baseDir.eachFileRecurse(FileType.FILES) { file -> + if (file.name.endsWith('.gradle')) { + def content = file.text + def newContent = content.replaceAll(dependencyPattern) { match, config, group, name, version -> + def libName = "${name.replaceAll('-', '.')}".toLowerCase() + "$config libs.${libName}" + } + file.text = newContent + } +} + +println "Dependency patterns replaced successfully." \ No newline at end of file diff --git a/generate-version-catalog.groovy b/generate-version-catalog.groovy new file mode 100644 index 0000000000000..fe4890e725599 --- /dev/null +++ b/generate-version-catalog.groovy @@ -0,0 +1,319 @@ +import java.nio.file.* +import java.nio.charset.StandardCharsets +import java.util.regex.Pattern + +REPO_ROOT = "/Users/rene/dev/elastic/elasticsearch/plugins" +VERSION_PROPS = REPO_ROOT + "/../build-tools-internal/version.properties" + +def parseGradleFiles(Path directory) { + def pattern = Pattern.compile(/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/) + def dependencies = [] + + Files.walk(directory).each { path -> + if (Files.isRegularFile(path) && path.toString().endsWith('.gradle') && path.toString().contains("plugins/examples") == false){ + def lines = Files.readAllLines(path, StandardCharsets.UTF_8) + lines.each { line -> + def matcher = pattern.matcher(line) + if (matcher.find()) { + def configuration = matcher.group(1) + def group = matcher.group(2) + def name = matcher.group(3) + def version = matcher.group(4) + dependencies << [file: path.toString(), configuration: configuration, group: group, name: name, version: version] + } + } + } + } + return dependencies +} + +String convertToVersionCatalogEntry(def dependencies) { + Set versions = new TreeSet<>() + Set entries = new TreeSet<>() + +} + +def resolveVersion(Properties props, String versionString) { + println "Resolving version: ${versionString}" + if(versionString.startsWith("\${versions.")) { + def versionId = versionString.substring(versionString.indexOf('.') + 1, versionString.indexOf('}')) + if(props.containsKey(versionId)) { + return props.getProperty(versionId) + } else { + println "unknown version ${versionString} found in build.gradle file. Please add it to the version.properties file." + return versionId + } + } + + return versionString +} + + +Properties loadVersionProperties() { + def properties = new Properties() + def file = new File(VERSION_PROPS) + if (!file.exists()) { + println "The properties file '${VERSION_PROPS}' does not exist." + return null + } + file.withInputStream { stream -> + properties.load(stream) + } + properties.each { key, value -> + println "Loaded version property: ${key} = ${value}" + } + return properties +} + +def convertToCamelCase(String input) { + def parts = input.split('-') + def camelCaseString = parts[0] + parts.tail().each { part -> + // for now skip camel casing + //camelCaseString += part.capitalize() + camelCaseString += part + } + return camelCaseString +} + +String calculateVersionRef(String libraryName, Map versionCatalog, Properties properties, String version) { + // String versionRefName = convertToCamelCase(libraryName) + String versionRefName = libraryName + + if(versionCatalog.containsKey(versionRefName)) { + def existingMajor = versionCatalog[libraryName].split("\\.")[0] as int + def newMajor = version.split("\\.")[0] as int + println "existingMajor: ${existingMajor}, newMajor: ${newMajor}" + + if(newMajor > existingMajor) { + return versionRefName + newMajor + } + } + return versionRefName +} + +def checkOptimizations(Map versionCatalog, Properties versionProperties) { + def simplifications = [:] + versionCatalog.each { givenKey, givenVersion -> + def simpleKey = givenKey.contains("-") ? givenKey.split('-')[0] : givenKey + def candidates = versionCatalog.findAll {k, v -> givenKey != k && k.startsWith("${simpleKey}-")} + if(candidates.size() == 0 && versionProperties[simpleKey] != null) { + assert versionProperties[simpleKey] == givenVersion + simplifications[givenKey] = simpleKey + } else { + candidates.each {candidateKey , candidateVersion -> + if(candidateVersion == givenVersion) { + simplifications[candidateKey] = simpleKey + } + } + } + + if(simplifications[givenKey] == null){ + def converted = convertToCamelCase(givenKey) + + if(givenKey != converted) { + simplifications[givenKey] = converted + } + } + } + + return simplifications +} + + +def parseValue(value) { + if (value.startsWith('"') && value.endsWith('"')) { + return value[1..-2] // String value + } else if (value ==~ /\d+/) { + return value.toInteger() // Integer value + } else if (value ==~ /\d+\.\d+/) { + return value.toDouble() // Double value + } else if (value == 'true' || value == 'false') { + return value.toBoolean() // Boolean value + } else if (value.startsWith('[') && value.endsWith(']')) { + return value[1..-2].split(',').collect { parseValue(it.trim()) } // Array value + } else { + return value // Default to string if not matched + } +} + +def parseTomlFile(filePath) { + def tomlMap = [:] + def currentSection = null + def file = new File(filePath) + + file.eachLine { line -> + line = line.trim() + + if (line.startsWith('#') || line.isEmpty()) { + // Skip comments and empty lines + return + } + + if (line.startsWith('[') && line.endsWith(']')) { + // New section + currentSection = line[1..-2] + tomlMap[currentSection] = [:] + } else if (line.contains('=')) { + // Key-value pair + def (key, value) = line.split('=', 2).collect { it.trim() } + value = parseValue(value) + if (currentSection) { + tomlMap[currentSection][key] = value + } else { + tomlMap[key] = value + } + } + } + + return tomlMap +} + +def main() { + // def directoryPath = System.console().readLine('Enter the directory path to search for *.gradle files: ').trim() + // def directory = Paths.get("directoryPath") + def directory = Paths.get(REPO_ROOT) + + if (!Files.exists(directory) || !Files.isDirectory(directory)) { + println "The directory '${directoryPath}' does not exist or is not a directory." + return + } + + def dependencies = parseGradleFiles(directory) + + def librariesCatalog = [:] + def versionsCatalog = [:] + + Properties versionProperties = loadVersionProperties() + println "Version Properties: ${versionProperties.contains('junit')}" + if (dependencies) { + def depsByFile = dependencies.groupBy {it.file} + depsByFile.each { file, deps -> + println "File: ${file}" + deps.each { dep -> + def effectiveVersion = resolveVersion(versionProperties, dep.version) + def versionRefName = calculateVersionRef(dep.name, versionsCatalog, versionProperties, effectiveVersion) + versionsCatalog.put(versionRefName, effectiveVersion) + depLibraryEntry = [group: dep.group, name: dep.name, version:versionRefName] + println "\"${dep.group}:${dep.name}:${dep.version}\" -> \"${depLibraryEntry}\"" + if(librariesCatalog.containsKey(versionRefName)) { + assert librariesCatalog[versionRefName] == depLibraryEntry + } else { + librariesCatalog.put(versionRefName, depLibraryEntry) + } + } + + println "" + } + + println "libraries Catalog versions" + + librariesCatalog.each { key, value -> + println "${key} = ${value}" + } + + println "Version Catalog libraries" + versionsCatalog.each { key, value -> + println "${key} = ${value}" + } + println "Found ${dependencies.size()} dependencies in ${depsByFile.size()} files." + + } else { + println "No dependencies found." + } + + def versionOptimizations = checkOptimizations(versionsCatalog, versionProperties) + + versionOptimizations.each { given, simplified -> + println "$given -> $simplified" + println "${versionsCatalog[simplified]}" + if(versionsCatalog[simplified] == null) { + versionsCatalog[simplified] = versionsCatalog[given] + } + versionsCatalog.remove(given) + } + + librariesCatalog.each { key, value -> + def simplified = versionOptimizations[key] + if(simplified != null) { + librariesCatalog[key].version = simplified + } + } + + println "\n\nversions: " + versionsCatalog.sort().each { key, value -> + println "${key} = \"${value}\"" + } + + librariesCatalog.sort() + println "\n\nlibraries: " + librariesCatalog.sort().each { k, v -> + println "${k} = { group = \"${v['group']}\", name = \"${v['name']}\", version.ref = \"${v['version']}\" } " + } + + // Example usage + def tomlFilePath = '/Users/rene/dev/elastic/elasticsearch/gradle/versions.toml' + def parsedToml = parseTomlFile(tomlFilePath) + + // Access parsed data + existingVersions = parsedToml['versions'] + +// println "\n\nExisting versions:" +// existingVersions.forEach { key, value -> +// println "${key} = ${value}" +// } + +// existingLibs = parsedToml['libraries'] + +// existingLibs.forEach { key, value -> +// println "${key} = ${value}" +// } + +def finalVersions = [:] +def finalLibraries = [:] + +existingVersions.each { key, value -> + finalVersions[key] = value + if(versionsCatalog.containsKey(key)) { + assert value == versionsCatalog[key] + versionsCatalog.remove(key) + } +} +finalVersions.putAll(versionsCatalog) + + +println "\n\n[versions]" +finalVersions.sort().each { key, value -> + println "${key} = \"${value}\"" +} + +def existingLibs = parsedToml['libraries'] +existingLibs.each { key, value -> + finalLibraries[key] = value + if(librariesCatalog[key] != null) { + def newValue = librariesCatalog[key] + assert value == "{ group = \"${newValue['group']}\", name = \"${newValue['name']}\", version.ref = \"${newValue['version']}\" }" + librariesCatalog.remove(key) + } +} +finalLibraries.putAll(librariesCatalog) + +println "\n\n[libraries]" +finalLibraries.sort().each { key, value -> + if(value instanceof Map) { + println "${key} = { group = \"${value['group']}\", name = \"${value['name']}\", version.ref = \"${value['version']}\" }" + } else if (value.startsWith("{")) { + println "${key} = $value" + } else { + println "${key} = \"$value\"" + } +} + +// println "Title: ${parsedToml['title']}" +// println "Owner Name: ${parsedToml['versions']['name']}" +// println "Database Server: ${parsedToml['database']['server']}" +// println "Database Ports: ${parsedToml['database']['ports']}" + +} + +main() \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index e6441136f3d4ba8a0da8d277868979cfbc8ad796..2c3521197d7c4586c843d1d3e9090525f1898cde 100644 GIT binary patch delta 8703 zcmYLtRag{&)-BQ@Dc#cDDP2Q%r*wBHJ*0FE-92)X$3_b$L+F2Fa28UVeg>}yRjC}^a^+(Cdu_FTlV;w_x7ig{yd(NYi_;SHXEq`|Qa`qPMf1B~v#%<*D zn+KWJfX#=$FMopqZ>Cv7|0WiA^M(L@tZ=_Hi z*{?)#Cn^{TIzYD|H>J3dyXQCNy8f@~OAUfR*Y@C6r=~KMZ{X}q`t@Er8NRiCUcR=?Y+RMv`o0i{krhWT6XgmUt!&X=e_Q2=u@F=PXKpr9-FL@0 zfKigQcGHyPn{3vStLFk=`h@+Lh1XBNC-_nwNU{ytxZF$o}oyVfHMj|ZHWmEmZeNIlO5eLco<=RI&3=fYK*=kmv*75aqE~&GtAp(VJ z`VN#&v2&}|)s~*yQ)-V2@RmCG8lz5Ysu&I_N*G5njY`<@HOc*Bj)ZwC%2|2O<%W;M z+T{{_bHLh~n(rM|8SpGi8Whep9(cURNRVfCBQQ2VG<6*L$CkvquqJ~9WZ~!<6-EZ&L(TN zpSEGXrDiZNz)`CzG>5&_bxzBlXBVs|RTTQi5GX6s5^)a3{6l)Wzpnc|Cc~(5mO)6; z6gVO2Zf)srRQ&BSeg0)P2en#<)X30qXB{sujc3Ppm4*)}zOa)@YZ<%1oV9K%+(VzJ zk(|p>q-$v>lImtsB)`Mm;Z0LaU;4T1BX!wbnu-PSlH1%`)jZZJ(uvbmM^is*r=Y{B zI?(l;2n)Nx!goxrWfUnZ?y5$=*mVU$Lpc_vS2UyW>tD%i&YYXvcr1v7hL2zWkHf42 z_8q$Gvl>%468i#uV`RoLgrO+R1>xP8I^7~&3(=c-Z-#I`VDnL`6stnsRlYL zJNiI`4J_0fppF<(Ot3o2w?UT*8QQrk1{#n;FW@4M7kR}oW-}k6KNQaGPTs=$5{Oz} zUj0qo@;PTg#5moUF`+?5qBZ)<%-$qw(Z?_amW*X}KW4j*FmblWo@SiU16V>;nm`Eg zE0MjvGKN_eA%R0X&RDT!hSVkLbF`BFf;{8Nym#1?#5Fb?bAHY(?me2tww}5K9AV9y+T7YaqaVx8n{d=K`dxS|=))*KJn(~8u@^J% zj;8EM+=Dq^`HL~VPag9poTmeP$E`npJFh^|=}Mxs2El)bOyoimzw8(RQle(f$n#*v zzzG@VOO(xXiG8d?gcsp-Trn-36}+S^w$U(IaP`-5*OrmjB%Ozzd;jfaeRHAzc_#?- z`0&PVZANQIcb1sS_JNA2TFyN$*yFSvmZbqrRhfME3(PJ62u%KDeJ$ZeLYuiQMC2Sc z35+Vxg^@gSR6flp>mS|$p&IS7#fL@n20YbNE9(fH;n%C{w?Y0=N5?3GnQLIJLu{lm zV6h@UDB+23dQoS>>)p`xYe^IvcXD*6nDsR;xo?1aNTCMdbZ{uyF^zMyloFDiS~P7W>WuaH2+`xp0`!d_@>Fn<2GMt z&UTBc5QlWv1)K5CoShN@|0y1M?_^8$Y*U(9VrroVq6NwAJe zxxiTWHnD#cN0kEds(wN8YGEjK&5%|1pjwMH*81r^aXR*$qf~WiD2%J^=PHDUl|=+f zkB=@_7{K$Fo0%-WmFN_pyXBxl^+lLG+m8Bk1OxtFU}$fQU8gTYCK2hOC0sVEPCb5S z4jI07>MWhA%cA{R2M7O_ltorFkJ-BbmPc`{g&Keq!IvDeg8s^PI3a^FcF z@gZ2SB8$BPfenkFc*x#6&Z;7A5#mOR5qtgE}hjZ)b!MkOQ zEqmM3s>cI_v>MzM<2>U*eHoC69t`W`^9QBU^F$ z;nU4%0$)$ILukM6$6U+Xts8FhOFb|>J-*fOLsqVfB=vC0v2U&q8kYy~x@xKXS*b6i zy=HxwsDz%)!*T5Bj3DY1r`#@Tc%LKv`?V|g6Qv~iAnrqS+48TfuhmM)V_$F8#CJ1j4;L}TBZM~PX!88IT+lSza{BY#ER3TpyMqi# z#{nTi!IsLYt9cH?*y^bxWw4djrd!#)YaG3|3>|^1mzTuXW6SV4+X8sA2dUWcjH)a3 z&rXUMHbOO?Vcdf3H<_T-=DB0M4wsB;EL3lx?|T(}@)`*C5m`H%le54I{bfg7GHqYB z9p+30u+QXMt4z&iG%LSOk1uw7KqC2}ogMEFzc{;5x`hU(rh0%SvFCBQe}M#RSWJv;`KM zf7D&z0a)3285{R$ZW%+I@JFa^oZN)vx77y_;@p0(-gz6HEE!w&b}>0b)mqz-(lfh4 zGt}~Hl@{P63b#dc`trFkguB}6Flu!S;w7lp_>yt|3U=c|@>N~mMK_t#LO{n;_wp%E zQUm=z6?JMkuQHJ!1JV$gq)q)zeBg)g7yCrP=3ZA|wt9%_l#yPjsS#C7qngav8etSX+s?JJ1eX-n-%WvP!IH1%o9j!QH zeP<8aW}@S2w|qQ`=YNC}+hN+lxv-Wh1lMh?Y;LbIHDZqVvW^r;^i1O<9e z%)ukq=r=Sd{AKp;kj?YUpRcCr*6)<@Mnp-cx{rPayiJ0!7Jng}27Xl93WgthgVEn2 zQlvj!%Q#V#j#gRWx7((Y>;cC;AVbPoX*mhbqK*QnDQQ?qH+Q*$u6_2QISr!Fn;B-F@!E+`S9?+Jr zt`)cc(ZJ$9q^rFohZJoRbP&X3)sw9CLh#-?;TD}!i>`a;FkY6(1N8U-T;F#dGE&VI zm<*Tn>EGW(TioP@hqBg zn6nEolK5(}I*c;XjG!hcI0R=WPzT)auX-g4Znr;P`GfMa*!!KLiiTqOE*STX4C(PD z&}1K|kY#>~>sx6I0;0mUn8)=lV?o#Bcn3tn|M*AQ$FscYD$0H(UKzC0R588Mi}sFl z@hG4h^*;_;PVW#KW=?>N)4?&PJF&EO(X?BKOT)OCi+Iw)B$^uE)H>KQZ54R8_2z2_ z%d-F7nY_WQiSB5vWd0+>^;G^j{1A%-B359C(Eji{4oLT9wJ~80H`6oKa&{G- z)2n-~d8S0PIkTW_*Cu~nwVlE&Zd{?7QbsGKmwETa=m*RG>g??WkZ|_WH7q@ zfaxzTsOY2B3!Fu;rBIJ~aW^yqn{V;~4LS$xA zGHP@f>X^FPnSOxEbrnEOd*W7{c(c`b;RlOEQ*x!*Ek<^p*C#8L=Ty^S&hg zaV)g8<@!3p6(@zW$n7O8H$Zej+%gf^)WYc$WT{zp<8hmn!PR&#MMOLm^hcL2;$o=Q zXJ=9_0vO)ZpNxPjYs$nukEGK2bbL%kc2|o|zxYMqK8F?$YtXk9Owx&^tf`VvCCgUz zLNmDWtociY`(}KqT~qnVUkflu#9iVqXw7Qi7}YT@{K2Uk(Wx7Q-L}u^h+M(81;I*J ze^vW&-D&=aOQq0lF5nLd)OxY&duq#IdK?-r7En0MnL~W51UXJQFVVTgSl#85=q$+| zHI%I(T3G8ci9Ubq4(snkbQ*L&ksLCnX_I(xa1`&(Bp)|fW$kFot17I)jyIi06dDTTiI%gNR z8i*FpB0y0 zjzWln{UG1qk!{DEE5?0R5jsNkJ(IbGMjgeeNL4I9;cP&>qm%q7cHT}@l0v;TrsuY0 zUg;Z53O-rR*W!{Q*Gp26h`zJ^p&FmF0!EEt@R3aT4YFR0&uI%ko6U0jzEYk_xScP@ zyk%nw`+Ic4)gm4xvCS$)y;^)B9^}O0wYFEPas)!=ijoBCbF0DbVMP z`QI7N8;88x{*g=51AfHx+*hoW3hK(?kr(xVtKE&F-%Tb}Iz1Z8FW>usLnoCwr$iWv ztOVMNMV27l*fFE29x}veeYCJ&TUVuxsd`hV-8*SxX@UD6au5NDhCQ4Qs{{CJQHE#4 z#bg6dIGO2oUZQVY0iL1(Q>%-5)<7rhnenUjOV53*9Qq?aU$exS6>;BJqz2|#{We_| zX;Nsg$KS<+`*5=WA?idE6G~kF9oQPSSAs#Mh-|)@kh#pPCgp&?&=H@Xfnz`5G2(95 z`Gx2RfBV~`&Eyq2S9m1}T~LI6q*#xC^o*EeZ#`}Uw)@RD>~<_Kvgt2?bRbO&H3&h- zjB&3bBuWs|YZSkmcZvX|GJ5u7#PAF$wj0ULv;~$7a?_R%e%ST{al;=nqj-<0pZiEgNznHM;TVjCy5E#4f?hudTr0W8)a6o;H; zhnh6iNyI^F-l_Jz$F`!KZFTG$yWdioL=AhImGr!$AJihd{j(YwqVmqxMKlqFj<_Hlj@~4nmrd~&6#f~9>r2_e-^nca(nucjf z;(VFfBrd0?k--U9L*iey5GTc|Msnn6prtF*!5AW3_BZ9KRO2(q7mmJZ5kz-yms`04e; z=uvr2o^{lVBnAkB_~7b7?1#rDUh4>LI$CH1&QdEFN4J%Bz6I$1lFZjDz?dGjmNYlD zDt}f;+xn-iHYk~V-7Fx!EkS``+w`-f&Ow>**}c5I*^1tpFdJk>vG23PKw}FrW4J#x zBm1zcp^){Bf}M|l+0UjvJXRjP3~!#`I%q*E=>?HLZ>AvB5$;cqwSf_*jzEmxxscH; zcl>V3s>*IpK`Kz1vP#APs#|tV9~#yMnCm&FOllccilcNmAwFdaaY7GKg&(AKG3KFj zk@%9hYvfMO;Vvo#%8&H_OO~XHlwKd()gD36!_;o z*7pl*o>x9fbe?jaGUO25ZZ@#qqn@|$B+q49TvTQnasc$oy`i~*o}Ka*>Wg4csQOZR z|Fs_6-04vj-Dl|B2y{&mf!JlPJBf3qG~lY=a*I7SBno8rLRdid7*Kl@sG|JLCt60# zqMJ^1u^Gsb&pBPXh8m1@4;)}mx}m%P6V8$1oK?|tAk5V6yyd@Ez}AlRPGcz_b!c;; z%(uLm1Cp=NT(4Hcbk;m`oSeW5&c^lybx8+nAn&fT(!HOi@^&l1lDci*?L#*J7-u}} z%`-*V&`F1;4fWsvcHOlZF#SD&j+I-P(Mu$L;|2IjK*aGG3QXmN$e}7IIRko8{`0h9 z7JC2vi2Nm>g`D;QeN@^AhC0hKnvL(>GUqs|X8UD1r3iUc+-R4$=!U!y+?p6rHD@TL zI!&;6+LK_E*REZ2V`IeFP;qyS*&-EOu)3%3Q2Hw19hpM$3>v!!YABs?mG44{L=@rjD%X-%$ajTW7%t_$7to%9d3 z8>lk z?_e}(m&>emlIx3%7{ER?KOVXi>MG_)cDK}v3skwd%Vqn0WaKa1;e=bK$~Jy}p#~`B zGk-XGN9v)YX)K2FM{HNY-{mloSX|a?> z8Om9viiwL|vbVF~j%~hr;|1wlC0`PUGXdK12w;5Wubw}miQZ)nUguh?7asm90n>q= z;+x?3haT5#62bg^_?VozZ-=|h2NbG%+-pJ?CY(wdMiJ6!0ma2x{R{!ys=%in;;5@v z{-rpytg){PNbCGP4Ig>=nJV#^ie|N68J4D;C<1=$6&boh&ol~#A?F-{9sBL*1rlZshXm~6EvG!X9S zD5O{ZC{EEpHvmD5K}ck+3$E~{xrrg*ITiA}@ZCoIm`%kVqaX$|#ddV$bxA{jux^uRHkH)o6#}fT6XE|2BzU zJiNOAqcxdcQdrD=U7OVqer@p>30l|ke$8h;Mny-+PP&OM&AN z9)!bENg5Mr2g+GDIMyzQpS1RHE6ow;O*ye;(Qqej%JC?!D`u;<;Y}1qi5cL&jm6d9 za{plRJ0i|4?Q%(t)l_6f8An9e2<)bL3eULUVdWanGSP9mm?PqFbyOeeSs9{qLEO-) zTeH*<$kRyrHPr*li6p+K!HUCf$OQIqwIw^R#mTN>@bm^E=H=Ger_E=ztfGV9xTgh=}Hep!i97A;IMEC9nb5DBA5J#a8H_Daq~ z6^lZ=VT)7=y}H3=gm5&j!Q79#e%J>w(L?xBcj_RNj44r*6^~nCZZYtCrLG#Njm$$E z7wP?E?@mdLN~xyWosgwkCot8bEY-rUJLDo7gukwm@;TjXeQ>fr(wKP%7LnH4Xsv?o zUh6ta5qPx8a5)WO4 zK37@GE@?tG{!2_CGeq}M8VW(gU6QXSfadNDhZEZ}W2dwm)>Y7V1G^IaRI9ugWCP#sw1tPtU|13R!nwd1;Zw8VMx4hUJECJkocrIMbJI zS9k2|`0$SD%;g_d0cmE7^MXP_;_6`APcj1yOy_NXU22taG9Z;C2=Z1|?|5c^E}dR& zRfK2Eo=Y=sHm@O1`62ciS1iKv9BX=_l7PO9VUkWS7xlqo<@OxlR*tn$_WbrR8F?ha zBQ4Y!is^AIsq-46^uh;=9B`gE#Sh+4m>o@RMZFHHi=qb7QcUrgTos$e z^4-0Z?q<7XfCP~d#*7?hwdj%LyPj2}bsdWL6HctL)@!tU$ftMmV=miEvZ2KCJXP%q zLMG&%rVu8HaaM-tn4abcSE$88EYmK|5%_29B*L9NyO|~j3m>YGXf6fQL$(7>Bm9o zjHfJ+lmYu_`+}xUa^&i81%9UGQ6t|LV45I)^+m@Lz@jEeF;?_*y>-JbK`=ZVsSEWZ z$p^SK_v(0d02AyIv$}*8m)9kjef1-%H*_daPdSXD6mpc>TW`R$h9On=Z9n>+f4swL zBz^(d9uaQ_J&hjDvEP{&6pNz-bg;A===!Ac%}bu^>0}E)wdH1nc}?W*q^J2SX_A*d zBLF@n+=flfH96zs@2RlOz&;vJPiG6In>$&{D+`DNgzPYVu8<(N&0yPt?G|>D6COM# zVd)6v$i-VtYfYi1h)pXvO}8KO#wuF=F^WJXPC+;hqpv>{Z+FZTP1w&KaPl?D)*A=( z8$S{Fh;Ww&GqSvia6|MvKJg-RpNL<6MXTl(>1}XFfziRvPaLDT1y_tjLYSGS$N;8| zZC*Hcp!~u?v~ty3&dBm`1A&kUe6@`q!#>P>ZZZgGRYhNIxFU6B>@f@YL%hOV0=9s# z?@0~aR1|d9LFoSI+li~@?g({Y0_{~~E_MycHTXz`EZmR2$J$3QVoA25j$9pe?Ub)d z`jbm8v&V0JVfY-^1mG=a`70a_tjafgi}z-8$smw7Mc`-!*6y{rB-xN1l`G3PLBGk~ z{o(KCV0HEfj*rMAiluQuIZ1tevmU@m{adQQr3xgS!e_WXw&eE?GjlS+tL0@x%Hm{1 zzUF^qF*2KAxY0$~pzVRpg9dA*)^ z7&wu-V$7+Jgb<5g;U1z*ymus?oZi7&gr!_3zEttV`=5VlLtf!e&~zv~PdspA0JCRz zZi|bO5d)>E;q)?}OADAhGgey#6(>+36XVThP%b#8%|a9B_H^)Nps1md_lVv5~OO@(*IJO@;eqE@@(y}KA- z`zj@%6q#>hIgm9}*-)n(^Xbdp8`>w~3JCC`(H{NUh8Umm{NUntE+eMg^WvSyL+ilV zff54-b59jg&r_*;*#P~ON#I=gAW99hTD;}nh_j;)B6*tMgP_gz4?=2EJZg$8IU;Ly<(TTC?^)& zj@%V!4?DU&tE=8)BX6f~x0K+w$%=M3;Fpq$VhETRlJ8LEEe;aUcG;nBe|2Gw>+h7CuJ-^gYFhQzDg(`e=!2f7t0AXrl zAx`RQ1u1+}?EkEWSb|jQN)~wOg#Ss&1oHoFBvg{Z|4#g$)mNzjKLq+8rLR(jC(QUC Ojj7^59?Sdh$^Qpp*~F>< delta 8662 zcmYM1RaBhK(uL9BL4pT&ch}$qcL*As0R|^HFD`?-26qkaNwC3nu;A|Q0Yd)oJ7=x) z_f6HatE;=#>YLq{FoYf$!na@pfNwSyI%>|UMk5`vO(z@Ao)eZR(~D#FF?U$)+q)1q z9OVG^Ib0v?R8wYfQ*1H;5Oyixqnyt6cXR#u=LM~V7_GUu}N(b}1+x^JUL#_8Xj zB*(FInWvSPGo;K=k3}p&4`*)~)p`nX#}W&EpfKCcOf^7t zPUS81ov(mXS;$9To6q84I!tlP&+Z?lkctuIZ(SHN#^=JGZe^hr^(3d*40pYsjikBWME6IFf!!+kC*TBc!T)^&aJ#z0#4?OCUbNoa}pwh=_SFfMf|x$`-5~ zP%%u%QdWp#zY6PZUR8Mz1n$f44EpTEvKLTL;yiZrPCV=XEL09@qmQV#*Uu*$#-WMN zZ?rc(7}93z4iC~XHcatJev=ey*hnEzajfb|22BpwJ4jDi;m>Av|B?TqzdRm-YT(EV zCgl${%#nvi?ayAFYV7D_s#07}v&FI43BZz@`dRogK!k7Y!y6r=fvm~=F9QP{QTj>x z#Y)*j%`OZ~;rqP0L5@qYhR`qzh^)4JtE;*faTsB;dNHyGMT+fpyz~LDaMOO?c|6FD z{DYA+kzI4`aD;Ms|~h49UAvOfhMEFip&@&Tz>3O+MpC0s>`fl!T(;ZP*;Ux zr<2S-wo(Kq&wfD_Xn7XXQJ0E4u7GcC6pqe`3$fYZ5Eq4`H67T6lex_QP>Ca##n2zx z!tc=_Ukzf{p1%zUUkEO(0r~B=o5IoP1@#0A=uP{g6WnPnX&!1Z$UWjkc^~o^y^Kkn z%zCrr^*BPjcTA58ZR}?%q7A_<=d&<*mXpFSQU%eiOR`=78@}+8*X##KFb)r^zyfOTxvA@cbo65VbwoK0lAj3x8X)U5*w3(}5 z(Qfv5jl{^hk~j-n&J;kaK;fNhy9ZBYxrKQNCY4oevotO-|7X}r{fvYN+{sCFn2(40 zvCF7f_OdX*L`GrSf0U$C+I@>%+|wQv*}n2yT&ky;-`(%#^vF79p1 z>y`59E$f7!vGT}d)g)n}%T#-Wfm-DlGU6CX`>!y8#tm-Nc}uH50tG)dab*IVrt-TTEM8!)gIILu*PG_-fbnFjRA+LLd|_U3yas12Lro%>NEeG%IwN z{FWomsT{DqMjq{7l6ZECb1Hm@GQ`h=dcyApkoJ6CpK3n83o-YJnXxT9b2%TmBfKZ* zi~%`pvZ*;(I%lJEt9Bphs+j#)ws}IaxQYV6 zWBgVu#Kna>sJe;dBQ1?AO#AHecU~3cMCVD&G})JMkbkF80a?(~1HF_wv6X!p z6uXt_8u)`+*%^c@#)K27b&Aa%m>rXOcGQg8o^OB4t0}@-WWy38&)3vXd_4_t%F1|( z{z(S)>S!9eUCFA$fQ^127DonBeq@5FF|IR7(tZ?Nrx0(^{w#a$-(fbjhN$$(fQA(~|$wMG4 z?UjfpyON`6n#lVwcKQ+#CuAQm^nmQ!sSk>=Mdxk9e@SgE(L2&v`gCXv&8ezHHn*@% zi6qeD|I%Q@gb(?CYus&VD3EE#xfELUvni89Opq-6fQmY-9Di3jxF?i#O)R4t66ekw z)OW*IN7#{_qhrb?qlVwmM@)50jEGbjTiDB;nX{}%IC~pw{ev#!1`i6@xr$mgXX>j} zqgxKRY$fi?B7|GHArqvLWu;`?pvPr!m&N=F1<@i-kzAmZ69Sqp;$)kKg7`76GVBo{ zk+r?sgl{1)i6Hg2Hj!ehsDF3tp(@n2+l%ihOc7D~`vzgx=iVU0{tQ&qaV#PgmalfG zPj_JimuEvo^1X)dGYNrTHBXwTe@2XH-bcnfpDh$i?Il9r%l$Ob2!dqEL-To>;3O>` z@8%M*(1#g3_ITfp`z4~Z7G7ZG>~F0W^byMvwzfEf*59oM*g1H)8@2zL&da+$ms$Dp zrPZ&Uq?X)yKm7{YA;mX|rMEK@;W zA-SADGLvgp+)f01=S-d$Z8XfvEZk$amHe}B(gQX-g>(Y?IA6YJfZM(lWrf);5L zEjq1_5qO6U7oPSb>3|&z>OZ13;mVT zWCZ=CeIEK~6PUv_wqjl)pXMy3_46hB?AtR7_74~bUS=I}2O2CjdFDA*{749vOj2hJ z{kYM4fd`;NHTYQ_1Rk2dc;J&F2ex^}^%0kleFbM!yhwO|J^~w*CygBbkvHnzz@a~D z|60RVTr$AEa-5Z->qEMEfau=__2RanCTKQ{XzbhD{c!e5hz&$ZvhBX0(l84W%eW17 zQ!H)JKxP$wTOyq83^qmx1Qs;VuWuxclIp!BegkNYiwyMVBay@XWlTpPCzNn>&4)f* zm&*aS?T?;6?2>T~+!=Gq4fjP1Z!)+S<xiG>XqzY@WKKMzx?0|GTS4{ z+z&e0Uysciw#Hg%)mQ3C#WQkMcm{1yt(*)y|yao2R_FRX$WPvg-*NPoj%(k*{BA8Xx&0HEqT zI0Swyc#QyEeUc)0CC}x{p+J{WN>Z|+VZWDpzW`bZ2d7^Yc4ev~9u-K&nR zl#B0^5%-V4c~)1_xrH=dGbbYf*7)D&yy-}^V|Np|>V@#GOm($1=El5zV?Z`Z__tD5 zcLUi?-0^jKbZrbEny&VD!zA0Nk3L|~Kt4z;B43v@k~ zFwNisc~D*ZROFH;!f{&~&Pof-x8VG8{gSm9-Yg$G(Q@O5!A!{iQH0j z80Rs>Ket|`cbw>z$P@Gfxp#wwu;I6vi5~7GqtE4t7$Hz zPD=W|mg%;0+r~6)dC>MJ&!T$Dxq3 zU@UK_HHc`_nI5;jh!vi9NPx*#{~{$5Azx`_VtJGT49vB_=WN`*i#{^X`xu$9P@m>Z zL|oZ5CT=Zk?SMj{^NA5E)FqA9q88h{@E96;&tVv^+;R$K`kbB_ zZneKrSN+IeIrMq;4EcH>sT2~3B zrZf-vSJfekcY4A%e2nVzK8C5~rAaP%dV2Hwl~?W87Hdo<*EnDcbZqVUb#8lz$HE@y z2DN2AQh%OcqiuWRzRE>cKd)24PCc)#@o&VCo!Rcs;5u9prhK}!->CC)H1Sn-3C7m9 zyUeD#Udh1t_OYkIMAUrGU>ccTJS0tV9tW;^-6h$HtTbon@GL1&OukJvgz>OdY)x4D zg1m6Y@-|p;nB;bZ_O>_j&{BmuW9km4a728vJV5R0nO7wt*h6sy7QOT0ny-~cWTCZ3 z9EYG^5RaAbLwJ&~d(^PAiicJJs&ECAr&C6jQcy#L{JCK&anL)GVLK?L3a zYnsS$+P>UB?(QU7EI^%#9C;R-jqb;XWX2Bx5C;Uu#n9WGE<5U=zhekru(St>|FH2$ zOG*+Tky6R9l-yVPJk7giGulOO$gS_c!DyCog5PT`Sl@P!pHarmf7Y0HRyg$X@fB7F zaQy&vnM1KZe}sHuLY5u7?_;q!>mza}J?&eLLpx2o4q8$qY+G2&Xz6P8*fnLU+g&i2}$F%6R_Vd;k)U{HBg{+uuKUAo^*FRg!#z}BajS)OnqwXd!{u>Y&aH?)z%bwu_NB9zNw+~661!> zD3%1qX2{743H1G8d~`V=W`w7xk?bWgut-gyAl*6{dW=g_lU*m?fJ>h2#0_+J3EMz_ zR9r+0j4V*k>HU`BJaGd~@*G|3Yp?~Ljpth@!_T_?{an>URYtict~N+wb}%n)^GE8eM(=NqLnn*KJnE*v(7Oo)NmKB*qk;0&FbO zkrIQs&-)ln0-j~MIt__0pLdrcBH{C(62`3GvGjR?`dtTdX#tf-2qkGbeV;Ud6Dp0& z|A6-DPgg=v*%2`L4M&p|&*;;I`=Tn1M^&oER=Gp&KHBRxu_OuFGgX;-U8F?*2>PXjb!wwMMh_*N8$?L4(RdvV#O5cUu0F|_zQ#w1zMA4* zJeRk}$V4?zPVMB=^}N7x?(P7!x6BfI%*)yaUoZS0)|$bw07XN{NygpgroPW>?VcO} z@er3&#@R2pLVwkpg$X8HJM@>FT{4^Wi&6fr#DI$5{ERpM@|+60{o2_*a7k__tIvGJ9D|NPoX@$4?i_dQPFkx0^f$=#_)-hphQ93a0|`uaufR!Nlc^AP+hFWe~(j_DCZmv;7CJ4L7tWk{b;IFDvT zchD1qB=cE)Mywg5Nw>`-k#NQhT`_X^c`s$ODVZZ-)T}vgYM3*syn41}I*rz?)`Q<* zs-^C3!9AsV-nX^0wH;GT)Y$yQC*0x3o!Bl<%>h-o$6UEG?{g1ip>njUYQ}DeIw0@qnqJyo0do(`OyE4kqE2stOFNos%!diRfe=M zeU@=V=3$1dGv5ZbX!llJ!TnRQQe6?t5o|Y&qReNOxhkEa{CE6d^UtmF@OXk<_qkc0 zc+ckH8Knc!FTjk&5FEQ}$sxj!(a4223cII&iai-nY~2`|K89YKcrYFAMo^oIh@W^; zsb{KOy?dv_D5%}zPk_7^I!C2YsrfyNBUw_ude7XDc0-+LjC0!X_moHU3wmveS@GRu zX>)G}L_j1I-_5B|b&|{ExH~;Nm!xytCyc}Ed!&Hqg;=qTK7C93f>!m3n!S5Z!m`N} zjIcDWm8ES~V2^dKuv>8@Eu)Zi{A4;qHvTW7hB6B38h%$K76BYwC3DIQ0a;2fSQvo$ z`Q?BEYF1`@I-Nr6z{@>`ty~mFC|XR`HSg(HN>&-#&eoDw-Q1g;x@Bc$@sW{Q5H&R_ z5Aici44Jq-tbGnDsu0WVM(RZ=s;CIcIq?73**v!Y^jvz7ckw*=?0=B!{I?f{68@V( z4dIgOUYbLOiQccu$X4P87wZC^IbGnB5lLfFkBzLC3hRD?q4_^%@O5G*WbD?Wug6{<|N#Fv_Zf3ST>+v_!q5!fSy#{_XVq$;k*?Ar^R&FuFM7 zKYiLaSe>Cw@`=IUMZ*U#v>o5!iZ7S|rUy2(yG+AGnauj{;z=s8KQ(CdwZ>&?Z^&Bt z+74(G;BD!N^Ke>(-wwZN5~K%P#L)59`a;zSnRa>2dCzMEz`?VaHaTC>?&o|(d6e*Z zbD!=Ua-u6T6O!gQnncZ&699BJyAg9mKXd_WO8O`N@}bx%BSq)|jgrySfnFvzOj!44 z9ci@}2V3!ag8@ZbJO;;Q5ivdTWx+TGR`?75Jcje}*ufx@%5MFUsfsi%FoEx)&uzkN zgaGFOV!s@Hw3M%pq5`)M4Nz$)~Sr9$V2rkP?B7kvI7VAcnp6iZl zOd!(TNw+UH49iHWC4!W&9;ZuB+&*@Z$}>0fx8~6J@d)fR)WG1UndfdVEeKW=HAur| z15zG-6mf`wyn&x@&?@g1ibkIMob_`x7nh7yu9M>@x~pln>!_kzsLAY#2ng0QEcj)qKGj8PdWEuYKdM!jd{ zHP6j^`1g}5=C%)LX&^kpe=)X+KR4VRNli?R2KgYlwKCN9lcw8GpWMV+1Ku)~W^jV2 zyiTv-b*?$AhvU7j9~S5+u`Ysw9&5oo0Djp8e(j25Etbx42Qa=4T~}q+PG&XdkWDNF z7bqo#7KW&%dh~ST6hbu8S=0V`{X&`kAy@8jZWZJuYE}_#b4<-^4dNUc-+%6g($yN% z5ny^;ogGh}H5+Gq3jR21rQgy@5#TCgX+(28NZ4w}dzfx-LP%uYk9LPTKABaQh1ah) z@Y(g!cLd!Mcz+e|XI@@IH9z*2=zxJ0uaJ+S(iIsk7=d>A#L<}={n`~O?UTGX{8Pda z_KhI*4jI?b{A!?~-M$xk)w0QBJb7I=EGy&o3AEB_RloU;v~F8ubD@9BbxV1c36CsTX+wzAZlvUm*;Re06D+Bq~LYg-qF4L z5kZZ80PB&4U?|hL9nIZm%jVj0;P_lXar)NSt3u8xx!K6Y0bclZ%<9fwjZ&!^;!>ug zQ}M`>k@S{BR20cyVXtKK%Qa^7?e<%VSAPGmVtGo6zc6BkO5vW5)m8_k{xT3;ocdpH zudHGT06XU@y6U!&kP8i6ubMQl>cm7=(W6P7^24Uzu4Xpwc->ib?RSHL*?!d{c-aE# zp?TrFr{4iDL3dpljl#HHbEn{~eW2Nqfksa(r-}n)lJLI%e#Bu|+1% zN&!n(nv(3^jGx?onfDcyeCC*p6)DuFn_<*62b92Pn$LH(INE{z^8y?mEvvO zZ~2I;A2qXvuj>1kk@WsECq1WbsSC!0m8n=S^t3kxAx~of0vpv{EqmAmDJ3(o;-cvf zu$33Z)C0)Y4(iBhh@)lsS|a%{;*W(@DbID^$ z|FzcJB-RFzpkBLaFLQ;EWMAW#@K(D#oYoOmcctdTV?fzM2@6U&S#+S$&zA4t<^-!V z+&#*xa)cLnfMTVE&I}o#4kxP~JT3-A)L_5O!yA2ebq?zvb0WO1D6$r9p?!L0#)Fc> z+I&?aog~FPBH}BpWfW^pyc{2i8#Io6e)^6wv}MZn&`01oq@$M@5eJ6J^IrXLI) z4C!#kh)89u5*Q@W5(rYDqBKO6&G*kPGFZfu@J}ug^7!sC(Wcv3Fbe{$Sy|{-VXTct znsP+0v}kduRs=S=x0MA$*(7xZPE-%aIt^^JG9s}8$43E~^t4=MxmMts;q2$^sj=k( z#^suR{0Wl3#9KAI<=SC6hifXuA{o02vdyq>iw%(#tv+@ov{QZBI^*^1K?Q_QQqA5n9YLRwO3a7JR+1x3#d3lZL;R1@8Z!2hnWj^_5 z^M{3wg%f15Db5Pd>tS!6Hj~n^l478ljxe@>!C;L$%rKfm#RBw^_K&i~ZyY_$BC%-L z^NdD{thVHFlnwfy(a?{%!m;U_9ic*!OPxf&5$muWz7&4VbW{PP)oE5u$uXUZU>+8R zCsZ~_*HLVnBm*^{seTAV=iN)mB0{<}C!EgE$_1RMj1kGUU?cjSWu*|zFA(ZrNE(CkY7>Mv1C)E1WjsBKAE%w}{~apwNj z0h`k)C1$TwZ<3de9+>;v6A0eZ@xHm#^7|z9`gQ3<`+lpz(1(RsgHAM@Ja+)c?;#j- zC=&5FD)m@9AX}0g9XQ_Yt4YB}aT`XxM-t>7v@BV}2^0gu0zRH%S9}!P(MBAFGyJ8F zEMdB&{eGOd$RqV77Lx>8pX^<@TdL{6^K7p$0uMTLC^n)g*yXRXMy`tqjYIZ|3b#Iv z4<)jtQU5`b{A;r2QCqIy>@!uuj^TBed3OuO1>My{GQe<^9|$4NOHTKFp{GpdFY-kC zi?uHq>lF$}<(JbQatP0*>$Aw_lygfmUyojkE=PnV)zc)7%^5BxpjkU+>ol2}WpB2hlDP(hVA;uLdu`=M_A!%RaRTd6>Mi_ozLYOEh!dfT_h0dSsnQm1bk)%K45)xLw zql&fx?ZOMBLXtUd$PRlqpo2CxNQTBb=!T|_>p&k1F})Hq&xksq>o#4b+KSs2KyxPQ z#{(qj@)9r6u2O~IqHG76@Fb~BZ4Wz_J$p_NU9-b3V$$kzjN24*sdw5spXetOuU1SR z{v}b92c>^PmvPs>BK2Ylp6&1>tnPsBA0jg0RQ{({-?^SBBm>=W>tS?_h^6%Scc)8L zgsKjSU@@6kSFX%_3%Qe{i7Z9Wg7~fM_)v?ExpM@htI{G6Db5ak(B4~4kRghRp_7zr z#Pco0_(bD$IS6l2j>%Iv^Hc)M`n-vIu;-2T+6nhW0JZxZ|NfDEh;ZnAe d|9e8rKfIInFTYPwOD9TMuEcqhmizAn{|ERF)u#Xe diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 515ab9d5f1822..efe2ff3449216 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=f8b4f4772d302c8ff580bc40d0f56e715de69b163546944f787c87abf209c961 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-all.zip +distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/gradlew b/gradlew index b740cf13397ab..f5feea6d6b116 100755 --- a/gradlew +++ b/gradlew @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# ############################################################################## # @@ -84,7 +86,8 @@ done # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) -APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum diff --git a/gradlew.bat b/gradlew.bat index 7101f8e4676fc..9b42019c7915b 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -13,6 +13,8 @@ @rem See the License for the specific language governing permissions and @rem limitations under the License. @rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem @if "%DEBUG%"=="" @echo off @rem ########################################################################## diff --git a/jdks.log b/jdks.log new file mode 100644 index 0000000000000..1354bcbe6c1b4 --- /dev/null +++ b/jdks.log @@ -0,0 +1,3146 @@ +{ + "service": "elastic_jvm_service", + "valid_locations": [ + "/manifest.json", + "/jdks", + "/jdk/adoptiumjdk-11.0.12+7-darwin", + "/jdk/adoptiumjdk-11.0.12+7-linux", + "/jdk/adoptiumjdk-11.0.12+7-linux-aarch64", + "/jdk/adoptiumjdk-11.0.12+7-windows", + "/jdk/adoptiumjdk-11.0.12+7-windows-x86_32", + "/jdk/adoptiumjdk-11.0.13+8-darwin", + "/jdk/adoptiumjdk-11.0.13+8-linux", + "/jdk/adoptiumjdk-11.0.13+8-linux-aarch64", + "/jdk/adoptiumjdk-11.0.13+8-windows", + "/jdk/adoptiumjdk-11.0.13+8-windows-x86_32", + "/jdk/adoptiumjdk-11.0.14+9-darwin", + "/jdk/adoptiumjdk-11.0.14+9-linux", + "/jdk/adoptiumjdk-11.0.14+9-linux-aarch64", + "/jdk/adoptiumjdk-11.0.14+9-windows", + "/jdk/adoptiumjdk-11.0.14+9-windows-x86_32", + "/jdk/adoptiumjdk-11.0.14.1+1-darwin", + "/jdk/adoptiumjdk-11.0.14.1+1-linux", + "/jdk/adoptiumjdk-11.0.14.1+1-linux-aarch64", + "/jdk/adoptiumjdk-11.0.14.1+1-windows", + "/jdk/adoptiumjdk-11.0.14.1+1-windows-x86_32", + "/jdk/adoptiumjdk-11.0.15+10-darwin", + "/jdk/adoptiumjdk-11.0.15+10-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.15+10-linux", + "/jdk/adoptiumjdk-11.0.15+10-linux-aarch64", + "/jdk/adoptiumjdk-11.0.15+10-windows", + "/jdk/adoptiumjdk-11.0.15+10-windows-x86_32", + "/jdk/adoptiumjdk-11.0.16+8-darwin", + "/jdk/adoptiumjdk-11.0.16+8-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.16+8-linux", + "/jdk/adoptiumjdk-11.0.16+8-linux-aarch64", + "/jdk/adoptiumjdk-11.0.16+8-windows", + "/jdk/adoptiumjdk-11.0.16+8-windows-x86_32", + "/jdk/adoptiumjdk-11.0.16.1+1-darwin", + "/jdk/adoptiumjdk-11.0.16.1+1-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.16.1+1-linux", + "/jdk/adoptiumjdk-11.0.16.1+1-linux-aarch64", + "/jdk/adoptiumjdk-11.0.16.1+1-windows", + "/jdk/adoptiumjdk-11.0.16.1+1-windows-x86_32", + "/jdk/adoptiumjdk-11.0.17+8-darwin", + "/jdk/adoptiumjdk-11.0.17+8-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.17+8-linux", + "/jdk/adoptiumjdk-11.0.17+8-linux-aarch64", + "/jdk/adoptiumjdk-11.0.17+8-windows", + "/jdk/adoptiumjdk-11.0.17+8-windows-x86_32", + "/jdk/adoptiumjdk-11.0.18+10-darwin", + "/jdk/adoptiumjdk-11.0.18+10-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.18+10-linux", + "/jdk/adoptiumjdk-11.0.18+10-linux-aarch64", + "/jdk/adoptiumjdk-11.0.18+10-windows", + "/jdk/adoptiumjdk-11.0.18+10-windows-x86_32", + "/jdk/adoptiumjdk-11.0.19+7-darwin", + "/jdk/adoptiumjdk-11.0.19+7-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.19+7-linux", + "/jdk/adoptiumjdk-11.0.19+7-linux-aarch64", + "/jdk/adoptiumjdk-11.0.19+7-windows", + "/jdk/adoptiumjdk-11.0.19+7-windows-x86_32", + "/jdk/adoptiumjdk-11.0.20+8-darwin", + "/jdk/adoptiumjdk-11.0.20+8-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.20+8-linux", + "/jdk/adoptiumjdk-11.0.20+8-linux-aarch64", + "/jdk/adoptiumjdk-11.0.20+8-windows", + "/jdk/adoptiumjdk-11.0.20+8-windows-x86_32", + "/jdk/adoptiumjdk-11.0.20.1+1-darwin", + "/jdk/adoptiumjdk-11.0.20.1+1-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.20.1+1-linux", + "/jdk/adoptiumjdk-11.0.20.1+1-linux-aarch64", + "/jdk/adoptiumjdk-11.0.20.1+1-windows", + "/jdk/adoptiumjdk-11.0.20.1+1-windows-x86_32", + "/jdk/adoptiumjdk-11.0.21+9-darwin", + "/jdk/adoptiumjdk-11.0.21+9-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.21+9-linux", + "/jdk/adoptiumjdk-11.0.21+9-linux-aarch64", + "/jdk/adoptiumjdk-11.0.21+9-windows", + "/jdk/adoptiumjdk-11.0.21+9-windows-x86_32", + "/jdk/adoptiumjdk-11.0.22+7-darwin", + "/jdk/adoptiumjdk-11.0.22+7-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.22+7-linux", + "/jdk/adoptiumjdk-11.0.22+7-linux-aarch64", + "/jdk/adoptiumjdk-11.0.22+7-windows", + "/jdk/adoptiumjdk-11.0.22+7-windows-x86_32", + "/jdk/adoptiumjdk-11.0.23+9-darwin", + "/jdk/adoptiumjdk-11.0.23+9-darwin-aarch64", + "/jdk/adoptiumjdk-11.0.23+9-linux", + "/jdk/adoptiumjdk-11.0.23+9-linux-aarch64", + "/jdk/adoptiumjdk-11.0.23+9-windows", + "/jdk/adoptiumjdk-11.0.23+9-windows-x86_32", + "/jdk/adoptiumjdk-16.0.2+7-darwin", + "/jdk/adoptiumjdk-16.0.2+7-linux", + "/jdk/adoptiumjdk-16.0.2+7-linux-aarch64", + "/jdk/adoptiumjdk-16.0.2+7-windows", + "/jdk/adoptiumjdk-16.0.2+7-windows-x86_32", + "/jdk/adoptiumjdk-17+35-darwin", + "/jdk/adoptiumjdk-17+35-darwin-aarch64", + "/jdk/adoptiumjdk-17+35-linux", + "/jdk/adoptiumjdk-17+35-linux-aarch64", + "/jdk/adoptiumjdk-17+35-windows", + "/jdk/adoptiumjdk-17+35-windows-x86_32", + "/jdk/adoptiumjdk-17.0.1+12-darwin", + "/jdk/adoptiumjdk-17.0.1+12-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.1+12-linux", + "/jdk/adoptiumjdk-17.0.1+12-linux-aarch64", + "/jdk/adoptiumjdk-17.0.1+12-windows", + "/jdk/adoptiumjdk-17.0.1+12-windows-x86_32", + "/jdk/adoptiumjdk-17.0.10+7-darwin", + "/jdk/adoptiumjdk-17.0.10+7-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.10+7-linux", + "/jdk/adoptiumjdk-17.0.10+7-linux-aarch64", + "/jdk/adoptiumjdk-17.0.10+7-windows", + "/jdk/adoptiumjdk-17.0.10+7-windows-x86_32", + "/jdk/adoptiumjdk-17.0.11+9-darwin", + "/jdk/adoptiumjdk-17.0.11+9-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.11+9-linux", + "/jdk/adoptiumjdk-17.0.11+9-linux-aarch64", + "/jdk/adoptiumjdk-17.0.11+9-windows", + "/jdk/adoptiumjdk-17.0.11+9-windows-x86_32", + "/jdk/adoptiumjdk-17.0.2+8-darwin", + "/jdk/adoptiumjdk-17.0.2+8-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.2+8-linux", + "/jdk/adoptiumjdk-17.0.2+8-linux-aarch64", + "/jdk/adoptiumjdk-17.0.2+8-windows", + "/jdk/adoptiumjdk-17.0.2+8-windows-x86_32", + "/jdk/adoptiumjdk-17.0.3+7-darwin", + "/jdk/adoptiumjdk-17.0.3+7-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.3+7-linux", + "/jdk/adoptiumjdk-17.0.3+7-linux-aarch64", + "/jdk/adoptiumjdk-17.0.3+7-windows", + "/jdk/adoptiumjdk-17.0.3+7-windows-x86_32", + "/jdk/adoptiumjdk-17.0.4+8-darwin", + "/jdk/adoptiumjdk-17.0.4+8-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.4+8-linux", + "/jdk/adoptiumjdk-17.0.4+8-linux-aarch64", + "/jdk/adoptiumjdk-17.0.4+8-windows", + "/jdk/adoptiumjdk-17.0.4+8-windows-x86_32", + "/jdk/adoptiumjdk-17.0.4.1+1-darwin", + "/jdk/adoptiumjdk-17.0.4.1+1-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.4.1+1-linux", + "/jdk/adoptiumjdk-17.0.4.1+1-linux-aarch64", + "/jdk/adoptiumjdk-17.0.4.1+1-windows", + "/jdk/adoptiumjdk-17.0.4.1+1-windows-x86_32", + "/jdk/adoptiumjdk-17.0.5+8-darwin", + "/jdk/adoptiumjdk-17.0.5+8-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.5+8-linux", + "/jdk/adoptiumjdk-17.0.5+8-linux-aarch64", + "/jdk/adoptiumjdk-17.0.5+8-windows", + "/jdk/adoptiumjdk-17.0.5+8-windows-x86_32", + "/jdk/adoptiumjdk-17.0.6+10-darwin", + "/jdk/adoptiumjdk-17.0.6+10-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.6+10-linux", + "/jdk/adoptiumjdk-17.0.6+10-linux-aarch64", + "/jdk/adoptiumjdk-17.0.6+10-windows", + "/jdk/adoptiumjdk-17.0.6+10-windows-x86_32", + "/jdk/adoptiumjdk-17.0.7+7-darwin", + "/jdk/adoptiumjdk-17.0.7+7-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.7+7-linux", + "/jdk/adoptiumjdk-17.0.7+7-linux-aarch64", + "/jdk/adoptiumjdk-17.0.7+7-windows", + "/jdk/adoptiumjdk-17.0.7+7-windows-x86_32", + "/jdk/adoptiumjdk-17.0.8+7-darwin", + "/jdk/adoptiumjdk-17.0.8+7-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.8+7-linux", + "/jdk/adoptiumjdk-17.0.8+7-linux-aarch64", + "/jdk/adoptiumjdk-17.0.8+7-windows", + "/jdk/adoptiumjdk-17.0.8+7-windows-x86_32", + "/jdk/adoptiumjdk-17.0.8.1+1-darwin", + "/jdk/adoptiumjdk-17.0.8.1+1-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.8.1+1-linux", + "/jdk/adoptiumjdk-17.0.8.1+1-linux-aarch64", + "/jdk/adoptiumjdk-17.0.8.1+1-windows", + "/jdk/adoptiumjdk-17.0.8.1+1-windows-x86_32", + "/jdk/adoptiumjdk-17.0.9+9-darwin", + "/jdk/adoptiumjdk-17.0.9+9-darwin-aarch64", + "/jdk/adoptiumjdk-17.0.9+9-linux", + "/jdk/adoptiumjdk-17.0.9+9-linux-aarch64", + "/jdk/adoptiumjdk-17.0.9+9-windows", + "/jdk/adoptiumjdk-17.0.9+9-windows-x86_32", + "/jdk/adoptiumjdk-21.0.1+12-darwin", + "/jdk/adoptiumjdk-21.0.1+12-darwin-aarch64", + "/jdk/adoptiumjdk-21.0.1+12-linux", + "/jdk/adoptiumjdk-21.0.1+12-linux-aarch64", + "/jdk/adoptiumjdk-21.0.1+12-windows", + "/jdk/adoptiumjdk-21.0.2+13-darwin", + "/jdk/adoptiumjdk-21.0.2+13-darwin-aarch64", + "/jdk/adoptiumjdk-21.0.2+13-linux", + "/jdk/adoptiumjdk-21.0.2+13-linux-aarch64", + "/jdk/adoptiumjdk-21.0.2+13-windows", + "/jdk/adoptiumjdk-21.0.3+9-darwin", + "/jdk/adoptiumjdk-21.0.3+9-darwin-aarch64", + "/jdk/adoptiumjdk-21.0.3+9-linux", + "/jdk/adoptiumjdk-21.0.3+9-linux-aarch64", + "/jdk/adoptiumjdk-21.0.3+9-windows", + "/jdk/adoptiumjdk-8u302-darwin", + "/jdk/adoptiumjdk-8u302-linux", + "/jdk/adoptiumjdk-8u302-linux-aarch64", + "/jdk/adoptiumjdk-8u302-windows", + "/jdk/adoptiumjdk-8u302-windows-x86_32", + "/jdk/adoptiumjdk-8u312-darwin", + "/jdk/adoptiumjdk-8u312-linux", + "/jdk/adoptiumjdk-8u312-linux-aarch64", + "/jdk/adoptiumjdk-8u312-windows", + "/jdk/adoptiumjdk-8u312-windows-x86_32", + "/jdk/adoptiumjdk-8u322-darwin", + "/jdk/adoptiumjdk-8u322-linux", + "/jdk/adoptiumjdk-8u322-linux-aarch64", + "/jdk/adoptiumjdk-8u322-windows", + "/jdk/adoptiumjdk-8u322-windows-x86_32", + "/jdk/adoptiumjdk-8u332-darwin", + "/jdk/adoptiumjdk-8u332-linux", + "/jdk/adoptiumjdk-8u332-linux-aarch64", + "/jdk/adoptiumjdk-8u332-windows", + "/jdk/adoptiumjdk-8u332-windows-x86_32", + "/jdk/adoptiumjdk-8u342-linux", + "/jdk/adoptiumjdk-8u342-linux-aarch64", + "/jdk/adoptiumjdk-8u342-windows", + "/jdk/adoptiumjdk-8u342-windows-x86_32", + "/jdk/adoptiumjdk-8u345-darwin", + "/jdk/adoptiumjdk-8u345-linux", + "/jdk/adoptiumjdk-8u345-linux-aarch64", + "/jdk/adoptiumjdk-8u345-windows", + "/jdk/adoptiumjdk-8u345-windows-x86_32", + "/jdk/adoptiumjdk-8u352-darwin", + "/jdk/adoptiumjdk-8u352-linux", + "/jdk/adoptiumjdk-8u352-linux-aarch64", + "/jdk/adoptiumjdk-8u352-windows", + "/jdk/adoptiumjdk-8u352-windows-x86_32", + "/jdk/adoptiumjdk-8u362-darwin", + "/jdk/adoptiumjdk-8u362-linux", + "/jdk/adoptiumjdk-8u362-linux-aarch64", + "/jdk/adoptiumjdk-8u362-windows", + "/jdk/adoptiumjdk-8u362-windows-x86_32", + "/jdk/adoptiumjdk-8u372-darwin", + "/jdk/adoptiumjdk-8u372-linux", + "/jdk/adoptiumjdk-8u372-linux-aarch64", + "/jdk/adoptiumjdk-8u372-windows", + "/jdk/adoptiumjdk-8u372-windows-x86_32", + "/jdk/adoptiumjdk-8u382-darwin", + "/jdk/adoptiumjdk-8u382-linux", + "/jdk/adoptiumjdk-8u382-linux-aarch64", + "/jdk/adoptiumjdk-8u382-windows", + "/jdk/adoptiumjdk-8u382-windows-x86_32", + "/jdk/adoptiumjdk-8u392-darwin", + "/jdk/adoptiumjdk-8u392-linux", + "/jdk/adoptiumjdk-8u392-linux-aarch64", + "/jdk/adoptiumjdk-8u392-windows", + "/jdk/adoptiumjdk-8u392-windows-x86_32", + "/jdk/adoptiumjdk-8u402-darwin", + "/jdk/adoptiumjdk-8u402-linux", + "/jdk/adoptiumjdk-8u402-linux-aarch64", + "/jdk/adoptiumjdk-8u402-windows", + "/jdk/adoptiumjdk-8u402-windows-x86_32", + "/jdk/adoptiumjdk-8u412-darwin", + "/jdk/adoptiumjdk-8u412-linux", + "/jdk/adoptiumjdk-8u412-linux-aarch64", + "/jdk/adoptiumjdk-8u412-windows", + "/jdk/adoptiumjdk-8u412-windows-x86_32", + "/jdk/adoptopenjdk-11+28-darwin", + "/jdk/adoptopenjdk-11+28-linux", + "/jdk/adoptopenjdk-11+28-windows", + "/jdk/adoptopenjdk-11.0.1+13-darwin", + "/jdk/adoptopenjdk-11.0.1+13-linux", + "/jdk/adoptopenjdk-11.0.1+13-linux-aarch64", + "/jdk/adoptopenjdk-11.0.1+13-windows", + "/jdk/adoptopenjdk-11.0.10+9-darwin", + "/jdk/adoptopenjdk-11.0.10+9-linux", + "/jdk/adoptopenjdk-11.0.10+9-linux-aarch64", + "/jdk/adoptopenjdk-11.0.10+9-windows", + "/jdk/adoptopenjdk-11.0.10+9-windows-x86_32", + "/jdk/adoptopenjdk-11.0.11+9-darwin", + "/jdk/adoptopenjdk-11.0.11+9-linux", + "/jdk/adoptopenjdk-11.0.11+9-linux-aarch64", + "/jdk/adoptopenjdk-11.0.11+9-windows", + "/jdk/adoptopenjdk-11.0.11+9-windows-x86_32", + "/jdk/adoptopenjdk-11.0.2+7-darwin", + "/jdk/adoptopenjdk-11.0.2+7-linux", + "/jdk/adoptopenjdk-11.0.2+7-linux-aarch64", + "/jdk/adoptopenjdk-11.0.2+7-windows", + "/jdk/adoptopenjdk-11.0.2+9-darwin", + "/jdk/adoptopenjdk-11.0.2+9-linux", + "/jdk/adoptopenjdk-11.0.2+9-linux-aarch64", + "/jdk/adoptopenjdk-11.0.2+9-windows", + "/jdk/adoptopenjdk-11.0.3+7-darwin", + "/jdk/adoptopenjdk-11.0.3+7-linux", + "/jdk/adoptopenjdk-11.0.3+7-linux-aarch64", + "/jdk/adoptopenjdk-11.0.3+7-windows", + "/jdk/adoptopenjdk-11.0.4+11-darwin", + "/jdk/adoptopenjdk-11.0.4+11-linux", + "/jdk/adoptopenjdk-11.0.4+11-linux-aarch64", + "/jdk/adoptopenjdk-11.0.4+11-windows", + "/jdk/adoptopenjdk-11.0.5+10-darwin", + "/jdk/adoptopenjdk-11.0.5+10-linux", + "/jdk/adoptopenjdk-11.0.5+10-windows", + "/jdk/adoptopenjdk-11.0.6+10-darwin", + "/jdk/adoptopenjdk-11.0.6+10-linux", + "/jdk/adoptopenjdk-11.0.6+10-linux-aarch64", + "/jdk/adoptopenjdk-11.0.6+10-windows", + "/jdk/adoptopenjdk-11.0.7+10-darwin", + "/jdk/adoptopenjdk-11.0.7+10-linux", + "/jdk/adoptopenjdk-11.0.7+10-linux-aarch64", + "/jdk/adoptopenjdk-11.0.7+10-windows", + "/jdk/adoptopenjdk-11.0.8+10-darwin", + "/jdk/adoptopenjdk-11.0.8+10-linux", + "/jdk/adoptopenjdk-11.0.8+10-linux-aarch64", + "/jdk/adoptopenjdk-11.0.8+10-windows", + "/jdk/adoptopenjdk-11.0.8+10-windows-x86_32", + "/jdk/adoptopenjdk-11.0.9+11-darwin", + "/jdk/adoptopenjdk-11.0.9+11-linux", + "/jdk/adoptopenjdk-11.0.9+11-linux-aarch64", + "/jdk/adoptopenjdk-11.0.9+11-windows", + "/jdk/adoptopenjdk-11.0.9+11-windows-x86_32", + "/jdk/adoptopenjdk-11.0.9.1+1-darwin", + "/jdk/adoptopenjdk-11.0.9.1+1-linux", + "/jdk/adoptopenjdk-11.0.9.1+1-linux-aarch64", + "/jdk/adoptopenjdk-11.0.9.1+1-windows", + "/jdk/adoptopenjdk-11.0.9.1+1-windows-x86_32", + "/jdk/adoptopenjdk-12+33-darwin", + "/jdk/adoptopenjdk-12+33-linux", + "/jdk/adoptopenjdk-12+33-windows", + "/jdk/adoptopenjdk-12.0.1+12-darwin", + "/jdk/adoptopenjdk-12.0.1+12-linux", + "/jdk/adoptopenjdk-12.0.1+12-linux-aarch64", + "/jdk/adoptopenjdk-12.0.1+12-windows", + "/jdk/adoptopenjdk-12.0.2+10-darwin", + "/jdk/adoptopenjdk-12.0.2+10-linux", + "/jdk/adoptopenjdk-12.0.2+10-linux-aarch64", + "/jdk/adoptopenjdk-12.0.2+10-windows", + "/jdk/adoptopenjdk-12.0.2+10-windows-x86_32", + "/jdk/adoptopenjdk-13.0.1+9-darwin", + "/jdk/adoptopenjdk-13.0.1+9-linux", + "/jdk/adoptopenjdk-13.0.1+9-windows", + "/jdk/adoptopenjdk-13.0.2+8-darwin", + "/jdk/adoptopenjdk-13.0.2+8-linux", + "/jdk/adoptopenjdk-13.0.2+8-linux-aarch64", + "/jdk/adoptopenjdk-13.0.2+8-windows", + "/jdk/adoptopenjdk-13.0.2+8-windows-x86_32", + "/jdk/adoptopenjdk-14.0.1+7-darwin", + "/jdk/adoptopenjdk-14.0.1+7-linux", + "/jdk/adoptopenjdk-14.0.1+7-linux-aarch64", + "/jdk/adoptopenjdk-14.0.1+7-windows", + "/jdk/adoptopenjdk-14.0.2+12-darwin", + "/jdk/adoptopenjdk-14.0.2+12-linux", + "/jdk/adoptopenjdk-14.0.2+12-linux-aarch64", + "/jdk/adoptopenjdk-14.0.2+12-windows", + "/jdk/adoptopenjdk-14.0.2+12-windows-x86_32", + "/jdk/adoptopenjdk-15+36-darwin", + "/jdk/adoptopenjdk-15+36-linux", + "/jdk/adoptopenjdk-15+36-linux-aarch64", + "/jdk/adoptopenjdk-15+36-windows", + "/jdk/adoptopenjdk-15+36-windows-x86_32", + "/jdk/adoptopenjdk-15.0.1+9-darwin", + "/jdk/adoptopenjdk-15.0.1+9-linux", + "/jdk/adoptopenjdk-15.0.1+9-linux-aarch64", + "/jdk/adoptopenjdk-15.0.1+9-windows", + "/jdk/adoptopenjdk-15.0.1+9-windows-x86_32", + "/jdk/adoptopenjdk-15.0.2+7-darwin", + "/jdk/adoptopenjdk-15.0.2+7-linux", + "/jdk/adoptopenjdk-15.0.2+7-linux-aarch64", + "/jdk/adoptopenjdk-15.0.2+7-windows", + "/jdk/adoptopenjdk-15.0.2+7-windows-x86_32", + "/jdk/adoptopenjdk-16+36-darwin", + "/jdk/adoptopenjdk-16+36-linux", + "/jdk/adoptopenjdk-16+36-linux-aarch64", + "/jdk/adoptopenjdk-16+36-windows", + "/jdk/adoptopenjdk-16+36-windows-x86_32", + "/jdk/adoptopenjdk-16.0.1+9-darwin", + "/jdk/adoptopenjdk-16.0.1+9-linux", + "/jdk/adoptopenjdk-16.0.1+9-linux-aarch64", + "/jdk/adoptopenjdk-16.0.1+9-windows", + "/jdk/adoptopenjdk-16.0.1+9-windows-x86_32", + "/jdk/adoptopenjdk-8u181-darwin", + "/jdk/adoptopenjdk-8u181-linux", + "/jdk/adoptopenjdk-8u181-windows", + "/jdk/adoptopenjdk-8u191-linux-aarch64", + "/jdk/adoptopenjdk-8u192-darwin", + "/jdk/adoptopenjdk-8u192-linux", + "/jdk/adoptopenjdk-8u192-windows", + "/jdk/adoptopenjdk-8u202-darwin", + "/jdk/adoptopenjdk-8u202-linux", + "/jdk/adoptopenjdk-8u202-windows", + "/jdk/adoptopenjdk-8u212-darwin", + "/jdk/adoptopenjdk-8u212-linux", + "/jdk/adoptopenjdk-8u212-windows", + "/jdk/adoptopenjdk-8u222-darwin", + "/jdk/adoptopenjdk-8u222-linux", + "/jdk/adoptopenjdk-8u222-linux-aarch64", + "/jdk/adoptopenjdk-8u222-windows", + "/jdk/adoptopenjdk-8u232-darwin", + "/jdk/adoptopenjdk-8u232-linux", + "/jdk/adoptopenjdk-8u232-linux-aarch64", + "/jdk/adoptopenjdk-8u232-windows", + "/jdk/adoptopenjdk-8u242-darwin", + "/jdk/adoptopenjdk-8u242-linux", + "/jdk/adoptopenjdk-8u242-windows", + "/jdk/adoptopenjdk-8u252-darwin", + "/jdk/adoptopenjdk-8u252-linux", + "/jdk/adoptopenjdk-8u252-linux-aarch64", + "/jdk/adoptopenjdk-8u252-windows", + "/jdk/adoptopenjdk-8u262-darwin", + "/jdk/adoptopenjdk-8u262-linux", + "/jdk/adoptopenjdk-8u262-linux-aarch64", + "/jdk/adoptopenjdk-8u262-windows", + "/jdk/adoptopenjdk-8u265-darwin", + "/jdk/adoptopenjdk-8u265-linux", + "/jdk/adoptopenjdk-8u265-linux-aarch64", + "/jdk/adoptopenjdk-8u265-windows", + "/jdk/adoptopenjdk-8u265-windows-x86_32", + "/jdk/adoptopenjdk-8u272-darwin", + "/jdk/adoptopenjdk-8u272-linux", + "/jdk/adoptopenjdk-8u272-linux-aarch64", + "/jdk/adoptopenjdk-8u272-windows", + "/jdk/adoptopenjdk-8u272-windows-x86_32", + "/jdk/adoptopenjdk-8u275-darwin", + "/jdk/adoptopenjdk-8u275-linux", + "/jdk/adoptopenjdk-8u275-linux-aarch64", + "/jdk/adoptopenjdk-8u275-windows", + "/jdk/adoptopenjdk-8u275-windows-x86_32", + "/jdk/adoptopenjdk-8u282-darwin", + "/jdk/adoptopenjdk-8u282-linux", + "/jdk/adoptopenjdk-8u282-windows", + "/jdk/adoptopenjdk-8u282-windows-x86_32", + "/jdk/adoptopenjdk-8u292-darwin", + "/jdk/adoptopenjdk-8u292-linux", + "/jdk/adoptopenjdk-8u292-linux-aarch64", + "/jdk/adoptopenjdk-8u292-windows", + "/jdk/adoptopenjdk-8u292-windows-x86_32", + "/jdk/adoptopenjdk-openj9-11.0.12+7-linux", + "/jdk/adoptopenjdk-openj9-11.0.12+7-windows", + "/jdk/adoptopenjdk-openj9-11.0.13+8-linux", + "/jdk/adoptopenjdk-openj9-11.0.13+8-windows", + "/jdk/adoptopenjdk-openj9-11.0.14+9-linux", + "/jdk/adoptopenjdk-openj9-11.0.14+9-windows", + "/jdk/adoptopenjdk-openj9-11.0.14.1+1-linux", + "/jdk/adoptopenjdk-openj9-11.0.14.1+1-windows", + "/jdk/adoptopenjdk-openj9-11.0.15+10-linux", + "/jdk/adoptopenjdk-openj9-11.0.15+10-windows", + "/jdk/adoptopenjdk-openj9-11.0.16+8-linux", + "/jdk/adoptopenjdk-openj9-11.0.16+8-windows", + "/jdk/adoptopenjdk-openj9-11.0.16.1+1-linux", + "/jdk/adoptopenjdk-openj9-11.0.16.1+1-windows", + "/jdk/adoptopenjdk-openj9-11.0.17+8-linux", + "/jdk/adoptopenjdk-openj9-11.0.17+8-windows", + "/jdk/adoptopenjdk-openj9-11.0.18+10-linux", + "/jdk/adoptopenjdk-openj9-11.0.18+10-windows", + "/jdk/adoptopenjdk-openj9-11.0.19+7-linux", + "/jdk/adoptopenjdk-openj9-11.0.19+7-windows", + "/jdk/adoptopenjdk-openj9-11.0.20+8-linux", + "/jdk/adoptopenjdk-openj9-11.0.20+8-windows", + "/jdk/adoptopenjdk-openj9-11.0.20.1+1-linux", + "/jdk/adoptopenjdk-openj9-11.0.20.1+1-windows", + "/jdk/adoptopenjdk-openj9-11.0.21+9-linux", + "/jdk/adoptopenjdk-openj9-11.0.21+9-windows", + "/jdk/adoptopenjdk-openj9-11.0.22+7-linux", + "/jdk/adoptopenjdk-openj9-11.0.22+7-windows", + "/jdk/adoptopenjdk-openj9-11.0.23+9-linux", + "/jdk/adoptopenjdk-openj9-11.0.23+9-windows", + "/jdk/adoptopenjdk-openj9-8u302-linux", + "/jdk/adoptopenjdk-openj9-8u302-windows", + "/jdk/adoptopenjdk-openj9-8u312-linux", + "/jdk/adoptopenjdk-openj9-8u312-windows", + "/jdk/adoptopenjdk-openj9-8u322-linux", + "/jdk/adoptopenjdk-openj9-8u322-windows", + "/jdk/adoptopenjdk-openj9-8u332-linux", + "/jdk/adoptopenjdk-openj9-8u332-windows", + "/jdk/adoptopenjdk-openj9-8u345-linux", + "/jdk/adoptopenjdk-openj9-8u345-windows", + "/jdk/adoptopenjdk-openj9-8u352-linux", + "/jdk/adoptopenjdk-openj9-8u352-windows", + "/jdk/adoptopenjdk-openj9-8u362-linux", + "/jdk/adoptopenjdk-openj9-8u362-windows", + "/jdk/adoptopenjdk-openj9-8u372-linux", + "/jdk/adoptopenjdk-openj9-8u372-windows", + "/jdk/adoptopenjdk-openj9-8u382-linux", + "/jdk/adoptopenjdk-openj9-8u382-windows", + "/jdk/adoptopenjdk-openj9-8u392-linux", + "/jdk/adoptopenjdk-openj9-8u392-windows", + "/jdk/adoptopenjdk-openj9-8u402-linux", + "/jdk/adoptopenjdk-openj9-8u402-windows", + "/jdk/adoptopenjdk-openj9-8u412-linux", + "/jdk/adoptopenjdk-openj9-8u412-windows", + "/jdk/amazon-corretto-11.0.10.9.1-darwin", + "/jdk/amazon-corretto-11.0.10.9.1-linux", + "/jdk/amazon-corretto-11.0.10.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.10.9.1-windows", + "/jdk/amazon-corretto-11.0.11.9.1-darwin", + "/jdk/amazon-corretto-11.0.11.9.1-linux", + "/jdk/amazon-corretto-11.0.11.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.11.9.1-windows", + "/jdk/amazon-corretto-11.0.12.7.1-linux", + "/jdk/amazon-corretto-11.0.12.7.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.12.7.1-windows", + "/jdk/amazon-corretto-11.0.12.7.2-darwin", + "/jdk/amazon-corretto-11.0.13.8.1-darwin", + "/jdk/amazon-corretto-11.0.13.8.1-linux", + "/jdk/amazon-corretto-11.0.13.8.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.13.8.1-windows", + "/jdk/amazon-corretto-11.0.14.10.1-darwin", + "/jdk/amazon-corretto-11.0.14.10.1-linux", + "/jdk/amazon-corretto-11.0.14.10.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.14.10.1-windows", + "/jdk/amazon-corretto-11.0.14.9.1-darwin", + "/jdk/amazon-corretto-11.0.14.9.1-linux", + "/jdk/amazon-corretto-11.0.14.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.14.9.1-windows", + "/jdk/amazon-corretto-11.0.15.2.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.15.9.1-darwin", + "/jdk/amazon-corretto-11.0.15.9.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.15.9.1-linux", + "/jdk/amazon-corretto-11.0.15.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.15.9.1-windows", + "/jdk/amazon-corretto-11.0.16.8.1-darwin", + "/jdk/amazon-corretto-11.0.16.8.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.16.8.1-linux", + "/jdk/amazon-corretto-11.0.16.8.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.16.8.1-windows", + "/jdk/amazon-corretto-11.0.16.8.3-darwin", + "/jdk/amazon-corretto-11.0.16.8.3-darwin-aarch64", + "/jdk/amazon-corretto-11.0.16.9.1-darwin", + "/jdk/amazon-corretto-11.0.16.9.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.16.9.1-linux", + "/jdk/amazon-corretto-11.0.16.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.16.9.1-windows", + "/jdk/amazon-corretto-11.0.17.8.1-darwin", + "/jdk/amazon-corretto-11.0.17.8.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.17.8.1-linux", + "/jdk/amazon-corretto-11.0.17.8.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.17.8.1-windows", + "/jdk/amazon-corretto-11.0.18.10.1-darwin", + "/jdk/amazon-corretto-11.0.18.10.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.18.10.1-linux", + "/jdk/amazon-corretto-11.0.18.10.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.18.10.1-windows", + "/jdk/amazon-corretto-11.0.19.7.1-darwin", + "/jdk/amazon-corretto-11.0.19.7.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.19.7.1-linux", + "/jdk/amazon-corretto-11.0.19.7.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.19.7.1-windows", + "/jdk/amazon-corretto-11.0.20.8.1-darwin", + "/jdk/amazon-corretto-11.0.20.8.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.20.8.1-linux", + "/jdk/amazon-corretto-11.0.20.8.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.20.8.1-windows", + "/jdk/amazon-corretto-11.0.20.9.1-darwin", + "/jdk/amazon-corretto-11.0.20.9.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.20.9.1-linux", + "/jdk/amazon-corretto-11.0.20.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.20.9.1-windows", + "/jdk/amazon-corretto-11.0.21.9.1-darwin", + "/jdk/amazon-corretto-11.0.21.9.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.21.9.1-linux", + "/jdk/amazon-corretto-11.0.21.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.21.9.1-windows", + "/jdk/amazon-corretto-11.0.22.7.1-darwin", + "/jdk/amazon-corretto-11.0.22.7.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.22.7.1-linux", + "/jdk/amazon-corretto-11.0.22.7.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.22.7.1-windows", + "/jdk/amazon-corretto-11.0.23.9.1-darwin", + "/jdk/amazon-corretto-11.0.23.9.1-darwin-aarch64", + "/jdk/amazon-corretto-11.0.23.9.1-linux", + "/jdk/amazon-corretto-11.0.23.9.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.23.9.1-windows", + "/jdk/amazon-corretto-11.0.3.7.1-darwin", + "/jdk/amazon-corretto-11.0.3.7.1-linux", + "/jdk/amazon-corretto-11.0.3.7.1-windows", + "/jdk/amazon-corretto-11.0.4.11.1-darwin", + "/jdk/amazon-corretto-11.0.4.11.1-linux", + "/jdk/amazon-corretto-11.0.4.11.1-windows", + "/jdk/amazon-corretto-11.0.5.10.1-linux", + "/jdk/amazon-corretto-11.0.5.10.1-windows", + "/jdk/amazon-corretto-11.0.5.10.2-darwin", + "/jdk/amazon-corretto-11.0.7.10.1+1-darwin", + "/jdk/amazon-corretto-11.0.7.10.1-darwin", + "/jdk/amazon-corretto-11.0.7.10.1-linux", + "/jdk/amazon-corretto-11.0.7.10.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.7.10.1-windows", + "/jdk/amazon-corretto-11.0.8.10.1-darwin", + "/jdk/amazon-corretto-11.0.8.10.1-linux", + "/jdk/amazon-corretto-11.0.8.10.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.8.10.1-windows", + "/jdk/amazon-corretto-11.0.9.11.1+1-linux", + "/jdk/amazon-corretto-11.0.9.11.1+1-linux-aarch64", + "/jdk/amazon-corretto-11.0.9.11.1-darwin", + "/jdk/amazon-corretto-11.0.9.11.2-windows", + "/jdk/amazon-corretto-11.0.9.12.1-darwin", + "/jdk/amazon-corretto-11.0.9.12.1-linux", + "/jdk/amazon-corretto-11.0.9.12.1-linux-aarch64", + "/jdk/amazon-corretto-11.0.9.12.1-windows", + "/jdk/amazon-corretto-17.0.0.35.1-darwin", + "/jdk/amazon-corretto-17.0.0.35.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.0.35.1-linux", + "/jdk/amazon-corretto-17.0.0.35.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.0.35.1-windows", + "/jdk/amazon-corretto-17.0.0.35.2-darwin", + "/jdk/amazon-corretto-17.0.0.35.2-darwin-aarch64", + "/jdk/amazon-corretto-17.0.1.12.1-darwin", + "/jdk/amazon-corretto-17.0.1.12.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.1.12.1-linux", + "/jdk/amazon-corretto-17.0.1.12.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.1.12.1-windows", + "/jdk/amazon-corretto-17.0.10.7.1-darwin", + "/jdk/amazon-corretto-17.0.10.7.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.10.7.1-linux", + "/jdk/amazon-corretto-17.0.10.7.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.10.7.1-windows", + "/jdk/amazon-corretto-17.0.10.8.1-linux", + "/jdk/amazon-corretto-17.0.10.8.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.11.9.1-darwin", + "/jdk/amazon-corretto-17.0.11.9.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.11.9.1-linux", + "/jdk/amazon-corretto-17.0.11.9.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.11.9.1-windows", + "/jdk/amazon-corretto-17.0.2.8.1-darwin", + "/jdk/amazon-corretto-17.0.2.8.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.2.8.1-linux", + "/jdk/amazon-corretto-17.0.2.8.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.2.8.1-windows", + "/jdk/amazon-corretto-17.0.3.6.1-darwin", + "/jdk/amazon-corretto-17.0.3.6.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.3.6.1-linux", + "/jdk/amazon-corretto-17.0.3.6.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.3.6.1-windows", + "/jdk/amazon-corretto-17.0.4.8.1-darwin", + "/jdk/amazon-corretto-17.0.4.8.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.4.8.1-linux", + "/jdk/amazon-corretto-17.0.4.8.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.4.8.1-windows", + "/jdk/amazon-corretto-17.0.4.9.1-darwin", + "/jdk/amazon-corretto-17.0.4.9.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.4.9.1-linux", + "/jdk/amazon-corretto-17.0.4.9.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.4.9.1-windows", + "/jdk/amazon-corretto-17.0.5.8.1-darwin", + "/jdk/amazon-corretto-17.0.5.8.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.5.8.1-linux", + "/jdk/amazon-corretto-17.0.5.8.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.5.8.1-windows", + "/jdk/amazon-corretto-17.0.6.10.1-darwin", + "/jdk/amazon-corretto-17.0.6.10.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.6.10.1-linux", + "/jdk/amazon-corretto-17.0.6.10.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.6.10.1-windows", + "/jdk/amazon-corretto-17.0.7.7.1-darwin", + "/jdk/amazon-corretto-17.0.7.7.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.7.7.1-linux", + "/jdk/amazon-corretto-17.0.7.7.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.7.7.1-windows", + "/jdk/amazon-corretto-17.0.8.7.1-darwin", + "/jdk/amazon-corretto-17.0.8.7.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.8.7.1-linux", + "/jdk/amazon-corretto-17.0.8.7.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.8.7.1-windows", + "/jdk/amazon-corretto-17.0.8.8.1-darwin", + "/jdk/amazon-corretto-17.0.8.8.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.8.8.1-linux", + "/jdk/amazon-corretto-17.0.8.8.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.8.8.1-windows", + "/jdk/amazon-corretto-17.0.9.8.1-darwin", + "/jdk/amazon-corretto-17.0.9.8.1-darwin-aarch64", + "/jdk/amazon-corretto-17.0.9.8.1-linux", + "/jdk/amazon-corretto-17.0.9.8.1-linux-aarch64", + "/jdk/amazon-corretto-17.0.9.8.1-windows", + "/jdk/amazon-corretto-21.0.0.34.1-darwin", + "/jdk/amazon-corretto-21.0.0.34.1-darwin-aarch64", + "/jdk/amazon-corretto-21.0.0.34.1-linux", + "/jdk/amazon-corretto-21.0.0.34.1-linux-aarch64", + "/jdk/amazon-corretto-21.0.0.34.1-windows", + "/jdk/amazon-corretto-21.0.0.35.1-darwin", + "/jdk/amazon-corretto-21.0.0.35.1-darwin-aarch64", + "/jdk/amazon-corretto-21.0.0.35.1-linux", + "/jdk/amazon-corretto-21.0.0.35.1-linux-aarch64", + "/jdk/amazon-corretto-21.0.0.35.1-windows", + "/jdk/amazon-corretto-21.0.1.12.1-darwin", + "/jdk/amazon-corretto-21.0.1.12.1-darwin-aarch64", + "/jdk/amazon-corretto-21.0.1.12.1-linux", + "/jdk/amazon-corretto-21.0.1.12.1-linux-aarch64", + "/jdk/amazon-corretto-21.0.1.12.1-windows", + "/jdk/amazon-corretto-21.0.2.13.1-darwin", + "/jdk/amazon-corretto-21.0.2.13.1-darwin-aarch64", + "/jdk/amazon-corretto-21.0.2.13.1-linux", + "/jdk/amazon-corretto-21.0.2.13.1-linux-aarch64", + "/jdk/amazon-corretto-21.0.2.13.1-windows", + "/jdk/amazon-corretto-21.0.2.14.1-linux", + "/jdk/amazon-corretto-21.0.2.14.1-linux-aarch64", + "/jdk/amazon-corretto-21.0.3.9.1-darwin", + "/jdk/amazon-corretto-21.0.3.9.1-darwin-aarch64", + "/jdk/amazon-corretto-21.0.3.9.1-linux", + "/jdk/amazon-corretto-21.0.3.9.1-linux-aarch64", + "/jdk/amazon-corretto-21.0.3.9.1-windows", + "/jdk/amazon-corretto-8.212.04.1-darwin", + "/jdk/amazon-corretto-8.212.04.1-linux", + "/jdk/amazon-corretto-8.212.04.1-windows", + "/jdk/amazon-corretto-8.212.04.2-darwin", + "/jdk/amazon-corretto-8.212.04.2-linux", + "/jdk/amazon-corretto-8.212.04.2-windows", + "/jdk/amazon-corretto-8.222.10.1-darwin", + "/jdk/amazon-corretto-8.222.10.1-linux", + "/jdk/amazon-corretto-8.222.10.3-windows", + "/jdk/amazon-corretto-8.232.09.1-linux", + "/jdk/amazon-corretto-8.232.09.1-windows", + "/jdk/amazon-corretto-8.232.09.2-darwin", + "/jdk/amazon-corretto-8.272.10.1-darwin", + "/jdk/amazon-corretto-8.272.10.1-linux", + "/jdk/amazon-corretto-8.272.10.1-linux-aarch64", + "/jdk/amazon-corretto-8.272.10.1-windows", + "/jdk/amazon-corretto-8.272.10.3-darwin", + "/jdk/amazon-corretto-8.272.10.3-linux", + "/jdk/amazon-corretto-8.272.10.3-linux-aarch64", + "/jdk/amazon-corretto-8.272.10.3-windows", + "/jdk/amazon-corretto-8.275.01.1-darwin", + "/jdk/amazon-corretto-8.275.01.1-linux", + "/jdk/amazon-corretto-8.275.01.1-linux-aarch64", + "/jdk/amazon-corretto-8.275.01.1-windows", + "/jdk/amazon-corretto-8.282.08.1-darwin", + "/jdk/amazon-corretto-8.282.08.1-linux", + "/jdk/amazon-corretto-8.282.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.282.08.1-windows", + "/jdk/amazon-corretto-8.292.10.1-darwin", + "/jdk/amazon-corretto-8.292.10.1-linux", + "/jdk/amazon-corretto-8.292.10.1-linux-aarch64", + "/jdk/amazon-corretto-8.292.10.1-windows", + "/jdk/amazon-corretto-8.292.10.2-linux-aarch64", + "/jdk/amazon-corretto-8.302.08.1-darwin", + "/jdk/amazon-corretto-8.302.08.1-linux", + "/jdk/amazon-corretto-8.302.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.302.08.1-windows", + "/jdk/amazon-corretto-8.312.07.1-darwin", + "/jdk/amazon-corretto-8.312.07.1-linux", + "/jdk/amazon-corretto-8.312.07.1-linux-aarch64", + "/jdk/amazon-corretto-8.312.07.1-windows", + "/jdk/amazon-corretto-8.322.06.1-darwin", + "/jdk/amazon-corretto-8.322.06.1-linux", + "/jdk/amazon-corretto-8.322.06.1-linux-aarch64", + "/jdk/amazon-corretto-8.322.06.1-windows", + "/jdk/amazon-corretto-8.322.06.2-linux", + "/jdk/amazon-corretto-8.322.06.2-linux-aarch64", + "/jdk/amazon-corretto-8.322.06.4-darwin-aarch64", + "/jdk/amazon-corretto-8.332.08.1-darwin", + "/jdk/amazon-corretto-8.332.08.1-darwin-aarch64", + "/jdk/amazon-corretto-8.332.08.1-linux", + "/jdk/amazon-corretto-8.332.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.332.08.1-windows", + "/jdk/amazon-corretto-8.342.07.1-darwin", + "/jdk/amazon-corretto-8.342.07.1-darwin-aarch64", + "/jdk/amazon-corretto-8.342.07.1-linux", + "/jdk/amazon-corretto-8.342.07.1-linux-aarch64", + "/jdk/amazon-corretto-8.342.07.1-windows", + "/jdk/amazon-corretto-8.342.07.3-darwin", + "/jdk/amazon-corretto-8.342.07.3-darwin-aarch64", + "/jdk/amazon-corretto-8.342.07.3-linux", + "/jdk/amazon-corretto-8.342.07.3-linux-aarch64", + "/jdk/amazon-corretto-8.342.07.3-windows", + "/jdk/amazon-corretto-8.342.07.4-linux", + "/jdk/amazon-corretto-8.342.07.4-linux-aarch64", + "/jdk/amazon-corretto-8.352.08.1-darwin", + "/jdk/amazon-corretto-8.352.08.1-darwin-aarch64", + "/jdk/amazon-corretto-8.352.08.1-linux", + "/jdk/amazon-corretto-8.352.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.352.08.1-windows", + "/jdk/amazon-corretto-8.362.08.1-darwin", + "/jdk/amazon-corretto-8.362.08.1-darwin-aarch64", + "/jdk/amazon-corretto-8.362.08.1-linux", + "/jdk/amazon-corretto-8.362.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.362.08.1-windows", + "/jdk/amazon-corretto-8.372.07.1-darwin", + "/jdk/amazon-corretto-8.372.07.1-darwin-aarch64", + "/jdk/amazon-corretto-8.372.07.1-linux", + "/jdk/amazon-corretto-8.372.07.1-linux-aarch64", + "/jdk/amazon-corretto-8.372.07.1-windows", + "/jdk/amazon-corretto-8.382.05.1-darwin", + "/jdk/amazon-corretto-8.382.05.1-darwin-aarch64", + "/jdk/amazon-corretto-8.382.05.1-linux", + "/jdk/amazon-corretto-8.382.05.1-linux-aarch64", + "/jdk/amazon-corretto-8.382.05.1-windows", + "/jdk/amazon-corretto-8.392.08.1-darwin", + "/jdk/amazon-corretto-8.392.08.1-darwin-aarch64", + "/jdk/amazon-corretto-8.392.08.1-linux", + "/jdk/amazon-corretto-8.392.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.392.08.1-windows", + "/jdk/amazon-corretto-8.402.06.1-linux", + "/jdk/amazon-corretto-8.402.06.1-linux-aarch64", + "/jdk/amazon-corretto-8.402.06.1-windows", + "/jdk/amazon-corretto-8.402.07.1-darwin", + "/jdk/amazon-corretto-8.402.07.1-darwin-aarch64", + "/jdk/amazon-corretto-8.402.08.1-darwin", + "/jdk/amazon-corretto-8.402.08.1-darwin-aarch64", + "/jdk/amazon-corretto-8.402.08.1-linux", + "/jdk/amazon-corretto-8.402.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.402.08.1-windows", + "/jdk/amazon-corretto-8.412.08.1-darwin", + "/jdk/amazon-corretto-8.412.08.1-darwin-aarch64", + "/jdk/amazon-corretto-8.412.08.1-linux", + "/jdk/amazon-corretto-8.412.08.1-linux-aarch64", + "/jdk/amazon-corretto-8.412.08.1-windows", + "/jdk/graalvm-ce-11-19.3.0-darwin", + "/jdk/graalvm-ce-11-19.3.0-linux", + "/jdk/graalvm-ce-11-19.3.0-linux-aarch64", + "/jdk/graalvm-ce-11-19.3.0-windows", + "/jdk/graalvm-ce-11-19.3.0.2-darwin", + "/jdk/graalvm-ce-11-19.3.0.2-linux", + "/jdk/graalvm-ce-11-19.3.0.2-linux-aarch64", + "/jdk/graalvm-ce-11-19.3.0.2-windows", + "/jdk/graalvm-ce-11-19.3.1-darwin", + "/jdk/graalvm-ce-11-19.3.1-linux", + "/jdk/graalvm-ce-11-19.3.1-linux-aarch64", + "/jdk/graalvm-ce-11-19.3.1-windows", + "/jdk/graalvm-ce-11-19.3.2-darwin", + "/jdk/graalvm-ce-11-19.3.2-linux", + "/jdk/graalvm-ce-11-19.3.2-linux-aarch64", + "/jdk/graalvm-ce-11-19.3.2-windows", + "/jdk/graalvm-ce-11-19.3.3-darwin", + "/jdk/graalvm-ce-11-19.3.3-linux", + "/jdk/graalvm-ce-11-19.3.3-linux-aarch64", + "/jdk/graalvm-ce-11-19.3.3-windows", + "/jdk/graalvm-ce-11-19.3.4-darwin", + "/jdk/graalvm-ce-11-19.3.4-linux", + "/jdk/graalvm-ce-11-19.3.4-linux-aarch64", + "/jdk/graalvm-ce-11-19.3.4-windows", + "/jdk/graalvm-ce-11-19.3.5-darwin", + "/jdk/graalvm-ce-11-19.3.5-linux", + "/jdk/graalvm-ce-11-19.3.5-linux-aarch64", + "/jdk/graalvm-ce-11-19.3.5-windows", + "/jdk/graalvm-ce-11-19.3.6-darwin", + "/jdk/graalvm-ce-11-19.3.6-linux", + "/jdk/graalvm-ce-11-19.3.6-windows", + "/jdk/graalvm-ce-11-20.0.0-darwin", + "/jdk/graalvm-ce-11-20.0.0-linux", + "/jdk/graalvm-ce-11-20.0.0-linux-aarch64", + "/jdk/graalvm-ce-11-20.0.0-windows", + "/jdk/graalvm-ce-11-20.1.0-darwin", + "/jdk/graalvm-ce-11-20.1.0-linux", + "/jdk/graalvm-ce-11-20.1.0-linux-aarch64", + "/jdk/graalvm-ce-11-20.1.0-windows", + "/jdk/graalvm-ce-11-20.2.0-darwin", + "/jdk/graalvm-ce-11-20.2.0-linux", + "/jdk/graalvm-ce-11-20.2.0-linux-aarch64", + "/jdk/graalvm-ce-11-20.2.0-windows", + "/jdk/graalvm-ce-11-20.3.0-darwin", + "/jdk/graalvm-ce-11-20.3.0-linux", + "/jdk/graalvm-ce-11-20.3.0-linux-aarch64", + "/jdk/graalvm-ce-11-20.3.0-windows", + "/jdk/graalvm-ce-11-20.3.1-darwin", + "/jdk/graalvm-ce-11-20.3.1-linux", + "/jdk/graalvm-ce-11-20.3.1-linux-aarch64", + "/jdk/graalvm-ce-11-20.3.1-windows", + "/jdk/graalvm-ce-11-20.3.1.2-darwin", + "/jdk/graalvm-ce-11-20.3.1.2-linux", + "/jdk/graalvm-ce-11-20.3.1.2-linux-aarch64", + "/jdk/graalvm-ce-11-20.3.1.2-windows", + "/jdk/graalvm-ce-11-20.3.2-darwin", + "/jdk/graalvm-ce-11-20.3.2-linux", + "/jdk/graalvm-ce-11-20.3.2-windows", + "/jdk/graalvm-ce-11-20.3.3-darwin", + "/jdk/graalvm-ce-11-20.3.3-linux", + "/jdk/graalvm-ce-11-20.3.3-windows", + "/jdk/graalvm-ce-11-20.3.4-darwin", + "/jdk/graalvm-ce-11-20.3.4-linux", + "/jdk/graalvm-ce-11-20.3.4-windows", + "/jdk/graalvm-ce-11-20.3.5-darwin", + "/jdk/graalvm-ce-11-20.3.5-linux", + "/jdk/graalvm-ce-11-20.3.5-windows", + "/jdk/graalvm-ce-11-20.3.6-darwin", + "/jdk/graalvm-ce-11-20.3.6-linux", + "/jdk/graalvm-ce-11-20.3.6-windows", + "/jdk/graalvm-ce-11-21.0.0-darwin", + "/jdk/graalvm-ce-11-21.0.0-linux", + "/jdk/graalvm-ce-11-21.0.0-linux-aarch64", + "/jdk/graalvm-ce-11-21.0.0-windows", + "/jdk/graalvm-ce-11-21.0.0.2-darwin", + "/jdk/graalvm-ce-11-21.0.0.2-linux", + "/jdk/graalvm-ce-11-21.0.0.2-linux-aarch64", + "/jdk/graalvm-ce-11-21.0.0.2-windows", + "/jdk/graalvm-ce-11-21.1.0-darwin", + "/jdk/graalvm-ce-11-21.1.0-linux", + "/jdk/graalvm-ce-11-21.1.0-linux-aarch64", + "/jdk/graalvm-ce-11-21.1.0-windows", + "/jdk/graalvm-ce-11-21.2.0-darwin", + "/jdk/graalvm-ce-11-21.2.0-linux", + "/jdk/graalvm-ce-11-21.2.0-linux-aarch64", + "/jdk/graalvm-ce-11-21.2.0-windows", + "/jdk/graalvm-ce-11-21.3.0-darwin", + "/jdk/graalvm-ce-11-21.3.0-linux", + "/jdk/graalvm-ce-11-21.3.0-linux-aarch64", + "/jdk/graalvm-ce-11-21.3.0-windows", + "/jdk/graalvm-ce-11-21.3.1-darwin", + "/jdk/graalvm-ce-11-21.3.1-linux", + "/jdk/graalvm-ce-11-21.3.1-linux-aarch64", + "/jdk/graalvm-ce-11-21.3.1-windows", + "/jdk/graalvm-ce-11-21.3.2-darwin", + "/jdk/graalvm-ce-11-21.3.2-linux", + "/jdk/graalvm-ce-11-21.3.2-linux-aarch64", + "/jdk/graalvm-ce-11-21.3.2-windows", + "/jdk/graalvm-ce-11-21.3.3-darwin", + "/jdk/graalvm-ce-11-21.3.3-linux", + "/jdk/graalvm-ce-11-21.3.3-linux-aarch64", + "/jdk/graalvm-ce-11-21.3.3-windows", + "/jdk/graalvm-ce-11-21.3.3.1-darwin", + "/jdk/graalvm-ce-11-21.3.3.1-linux", + "/jdk/graalvm-ce-11-21.3.3.1-linux-aarch64", + "/jdk/graalvm-ce-11-21.3.3.1-windows", + "/jdk/graalvm-ce-11-22.0.0.2-darwin", + "/jdk/graalvm-ce-11-22.0.0.2-linux", + "/jdk/graalvm-ce-11-22.0.0.2-linux-aarch64", + "/jdk/graalvm-ce-11-22.0.0.2-windows", + "/jdk/graalvm-ce-11-22.1.0-darwin", + "/jdk/graalvm-ce-11-22.1.0-darwin-aarch64", + "/jdk/graalvm-ce-11-22.1.0-linux", + "/jdk/graalvm-ce-11-22.1.0-linux-aarch64", + "/jdk/graalvm-ce-11-22.1.0-windows", + "/jdk/graalvm-ce-11-22.2.0-darwin", + "/jdk/graalvm-ce-11-22.2.0-darwin-aarch64", + "/jdk/graalvm-ce-11-22.2.0-linux", + "/jdk/graalvm-ce-11-22.2.0-linux-aarch64", + "/jdk/graalvm-ce-11-22.2.0-windows", + "/jdk/graalvm-ce-11-22.3.0-darwin", + "/jdk/graalvm-ce-11-22.3.0-darwin-aarch64", + "/jdk/graalvm-ce-11-22.3.0-linux", + "/jdk/graalvm-ce-11-22.3.0-linux-aarch64", + "/jdk/graalvm-ce-11-22.3.0-windows", + "/jdk/graalvm-ce-11-22.3.1-darwin", + "/jdk/graalvm-ce-11-22.3.1-darwin-aarch64", + "/jdk/graalvm-ce-11-22.3.1-linux", + "/jdk/graalvm-ce-11-22.3.1-linux-aarch64", + "/jdk/graalvm-ce-11-22.3.1-windows", + "/jdk/graalvm-ce-11-22.3.2-darwin", + "/jdk/graalvm-ce-11-22.3.2-linux", + "/jdk/graalvm-ce-11-22.3.2-linux-aarch64", + "/jdk/graalvm-ce-11-22.3.2-windows", + "/jdk/graalvm-ce-11-22.3.3-darwin", + "/jdk/graalvm-ce-11-22.3.3-linux", + "/jdk/graalvm-ce-11-22.3.3-linux-aarch64", + "/jdk/graalvm-ce-11-22.3.3-windows", + "/jdk/graalvm-ce-17-21.3.0-darwin", + "/jdk/graalvm-ce-17-21.3.0-linux", + "/jdk/graalvm-ce-17-21.3.0-linux-aarch64", + "/jdk/graalvm-ce-17-21.3.0-windows", + "/jdk/graalvm-ce-17-21.3.1-darwin", + "/jdk/graalvm-ce-17-21.3.1-linux", + "/jdk/graalvm-ce-17-21.3.1-linux-aarch64", + "/jdk/graalvm-ce-17-21.3.1-windows", + "/jdk/graalvm-ce-17-21.3.2-darwin", + "/jdk/graalvm-ce-17-21.3.2-linux", + "/jdk/graalvm-ce-17-21.3.2-linux-aarch64", + "/jdk/graalvm-ce-17-21.3.2-windows", + "/jdk/graalvm-ce-17-21.3.3-darwin", + "/jdk/graalvm-ce-17-21.3.3-linux", + "/jdk/graalvm-ce-17-21.3.3-linux-aarch64", + "/jdk/graalvm-ce-17-21.3.3-windows", + "/jdk/graalvm-ce-17-21.3.3.1-darwin", + "/jdk/graalvm-ce-17-21.3.3.1-linux", + "/jdk/graalvm-ce-17-21.3.3.1-linux-aarch64", + "/jdk/graalvm-ce-17-21.3.3.1-windows", + "/jdk/graalvm-ce-17-22.0.0.2-darwin", + "/jdk/graalvm-ce-17-22.0.0.2-linux", + "/jdk/graalvm-ce-17-22.0.0.2-linux-aarch64", + "/jdk/graalvm-ce-17-22.0.0.2-windows", + "/jdk/graalvm-ce-17-22.1.0-darwin", + "/jdk/graalvm-ce-17-22.1.0-darwin-aarch64", + "/jdk/graalvm-ce-17-22.1.0-linux", + "/jdk/graalvm-ce-17-22.1.0-linux-aarch64", + "/jdk/graalvm-ce-17-22.1.0-windows", + "/jdk/graalvm-ce-17-22.2.0-darwin", + "/jdk/graalvm-ce-17-22.2.0-darwin-aarch64", + "/jdk/graalvm-ce-17-22.2.0-linux", + "/jdk/graalvm-ce-17-22.2.0-linux-aarch64", + "/jdk/graalvm-ce-17-22.2.0-windows", + "/jdk/graalvm-ce-17-22.3.0-darwin", + "/jdk/graalvm-ce-17-22.3.0-darwin-aarch64", + "/jdk/graalvm-ce-17-22.3.0-linux", + "/jdk/graalvm-ce-17-22.3.0-linux-aarch64", + "/jdk/graalvm-ce-17-22.3.0-windows", + "/jdk/graalvm-ce-17-22.3.1-darwin", + "/jdk/graalvm-ce-17-22.3.1-darwin-aarch64", + "/jdk/graalvm-ce-17-22.3.1-linux", + "/jdk/graalvm-ce-17-22.3.1-linux-aarch64", + "/jdk/graalvm-ce-17-22.3.1-windows", + "/jdk/graalvm-ce-17-22.3.2-darwin", + "/jdk/graalvm-ce-17-22.3.2-linux", + "/jdk/graalvm-ce-17-22.3.2-linux-aarch64", + "/jdk/graalvm-ce-17-22.3.2-windows", + "/jdk/graalvm-ce-17-22.3.3-darwin", + "/jdk/graalvm-ce-17-22.3.3-linux", + "/jdk/graalvm-ce-17-22.3.3-linux-aarch64", + "/jdk/graalvm-ce-17-22.3.3-windows", + "/jdk/ibm-java-8.0-5.6-linux-x86_64", + "/jdk/ibm-java-8.0-6.25-linux-x86_64", + "/jdk/jdk-10-ea+35_darwin-x64", + "/jdk/jdk-10-ea+35_linux-x64", + "/jdk/jdk-10-ea+35_windows-x64", + "/jdk/jdk-10-ea+37_darwin-x64", + "/jdk/jdk-10-ea+37_linux-x64", + "/jdk/jdk-10-ea+37_windows-x64", + "/jdk/jdk-10-ea+42_darwin-x64", + "/jdk/jdk-10-ea+42_linux-x64", + "/jdk/jdk-10-ea+42_windows-x64", + "/jdk/jdk-8u101-linux-x64", + "/jdk/jdk-8u101-windows-x64", + "/jdk/jdk-8u102-linux-x64", + "/jdk/jdk-8u102-windows-x64", + "/jdk/jdk-8u121-linux-x64", + "/jdk/jdk-8u121-windows-x64", + "/jdk/jdk-8u131-linux-x64", + "/jdk/jdk-8u131-windows-x64", + "/jdk/jdk-8u141-darwin-x64", + "/jdk/jdk-8u141-linux-x64", + "/jdk/jdk-8u141-windows-x64", + "/jdk/jdk-8u144-darwin-x64", + "/jdk/jdk-8u144-linux-x64", + "/jdk/jdk-8u144-windows-x64", + "/jdk/jdk-8u151-darwin-x64", + "/jdk/jdk-8u151-linux-x64", + "/jdk/jdk-8u151-windows-x64", + "/jdk/jdk-8u152-darwin-x64", + "/jdk/jdk-8u152-linux-x64", + "/jdk/jdk-8u152-windows-x64", + "/jdk/jdk-8u161-darwin-x64", + "/jdk/jdk-8u161-linux-x64", + "/jdk/jdk-8u161-windows-x64", + "/jdk/jdk-8u162-darwin-x64", + "/jdk/jdk-8u162-linux-x64", + "/jdk/jdk-8u162-windows-x64", + "/jdk/jdk-8u20-linux-x64", + "/jdk/jdk-8u20-windows-x64", + "/jdk/jdk-8u45-linux-x64", + "/jdk/jdk-8u45-windows-x64", + "/jdk/jdk-9-ea+129_linux-x64", + "/jdk/jdk-9-ea+129_windows-x64", + "/jdk/jdk-9-ea+130_linux-x64", + "/jdk/jdk-9-ea+130_windows-x64", + "/jdk/jdk-9-ea+135_linux-x64", + "/jdk/jdk-9-ea+135_windows-x64", + "/jdk/jdk-9-ea+140_linux-x64", + "/jdk/jdk-9-ea+140_windows-x64", + "/jdk/jdk-9-ea+177_linux-x64", + "/jdk/jdk-9-ea+177_windows-x64", + "/jdk/jdk-9-ea+178_linux-x64", + "/jdk/jdk-9-ea+178_windows-x64", + "/jdk/jdk-9-ea+179_darwin-x64", + "/jdk/jdk-9-ea+179_linux-x64", + "/jdk/jdk-9-ea+179_windows-x64", + "/jdk/jdk-9-ea+180_darwin-x64", + "/jdk/jdk-9-ea+180_linux-x64", + "/jdk/jdk-9-ea+180_windows-x64", + "/jdk/jdk-9-ea+181_darwin-x64", + "/jdk/jdk-9-ea+181_linux-x64", + "/jdk/jdk-9-ea+181_windows-x64", + "/jdk/jdk-9.0.1+11_darwin-x64", + "/jdk/jdk-9.0.1+11_linux-x64", + "/jdk/jdk-9.0.1+11_windows-x64", + "/jdk/jdk-9.0.4+11_darwin-x64", + "/jdk/jdk-9.0.4+11_linux-x64", + "/jdk/jdk-9.0.4+11_windows-x64", + "/jdk/openjdk-10+43-darwin", + "/jdk/openjdk-10+43-linux", + "/jdk/openjdk-10+43-windows", + "/jdk/openjdk-10-darwin", + "/jdk/openjdk-10-linux", + "/jdk/openjdk-10-windows", + "/jdk/openjdk-10.0.1-darwin", + "/jdk/openjdk-10.0.1-linux", + "/jdk/openjdk-10.0.1-windows", + "/jdk/openjdk-10.0.2-darwin", + "/jdk/openjdk-10.0.2-linux", + "/jdk/openjdk-10.0.2-windows", + "/jdk/openjdk-11+11-darwin", + "/jdk/openjdk-11+11-linux", + "/jdk/openjdk-11+11-windows", + "/jdk/openjdk-11+12-darwin", + "/jdk/openjdk-11+12-linux", + "/jdk/openjdk-11+12-windows", + "/jdk/openjdk-11+13-darwin", + "/jdk/openjdk-11+13-linux", + "/jdk/openjdk-11+13-windows", + "/jdk/openjdk-11+14-darwin", + "/jdk/openjdk-11+14-linux", + "/jdk/openjdk-11+14-windows", + "/jdk/openjdk-11+15-darwin", + "/jdk/openjdk-11+15-linux", + "/jdk/openjdk-11+15-windows", + "/jdk/openjdk-11+16-darwin", + "/jdk/openjdk-11+16-linux", + "/jdk/openjdk-11+16-windows", + "/jdk/openjdk-11+17-darwin", + "/jdk/openjdk-11+17-linux", + "/jdk/openjdk-11+17-windows", + "/jdk/openjdk-11+18-darwin", + "/jdk/openjdk-11+18-linux", + "/jdk/openjdk-11+18-windows", + "/jdk/openjdk-11+19-darwin", + "/jdk/openjdk-11+19-linux", + "/jdk/openjdk-11+19-windows", + "/jdk/openjdk-11+20-darwin", + "/jdk/openjdk-11+20-linux", + "/jdk/openjdk-11+20-windows", + "/jdk/openjdk-11+21-darwin", + "/jdk/openjdk-11+21-linux", + "/jdk/openjdk-11+21-windows", + "/jdk/openjdk-11+22-darwin", + "/jdk/openjdk-11+22-linux", + "/jdk/openjdk-11+22-windows", + "/jdk/openjdk-11+23-darwin", + "/jdk/openjdk-11+23-linux", + "/jdk/openjdk-11+23-windows", + "/jdk/openjdk-11+24-darwin", + "/jdk/openjdk-11+24-linux", + "/jdk/openjdk-11+24-windows", + "/jdk/openjdk-11+25-darwin", + "/jdk/openjdk-11+25-linux", + "/jdk/openjdk-11+25-windows", + "/jdk/openjdk-11+26-darwin", + "/jdk/openjdk-11+26-linux", + "/jdk/openjdk-11+26-windows", + "/jdk/openjdk-11+27-darwin", + "/jdk/openjdk-11+27-linux", + "/jdk/openjdk-11+27-windows", + "/jdk/openjdk-11+28-darwin", + "/jdk/openjdk-11+28-linux", + "/jdk/openjdk-11+28-windows", + "/jdk/openjdk-11+5-darwin", + "/jdk/openjdk-11+5-linux", + "/jdk/openjdk-11+5-windows", + "/jdk/openjdk-11-darwin", + "/jdk/openjdk-11-linux", + "/jdk/openjdk-11-windows", + "/jdk/openjdk-11.0.1-darwin", + "/jdk/openjdk-11.0.1-linux", + "/jdk/openjdk-11.0.1-windows", + "/jdk/openjdk-11.0.2-darwin", + "/jdk/openjdk-11.0.2-linux", + "/jdk/openjdk-11.0.2-windows", + "/jdk/openjdk-12+23-darwin", + "/jdk/openjdk-12+23-linux", + "/jdk/openjdk-12+23-windows", + "/jdk/openjdk-12+24-darwin", + "/jdk/openjdk-12+24-linux", + "/jdk/openjdk-12+24-windows", + "/jdk/openjdk-12+25-darwin", + "/jdk/openjdk-12+25-linux", + "/jdk/openjdk-12+25-windows", + "/jdk/openjdk-12+27-darwin", + "/jdk/openjdk-12+27-linux", + "/jdk/openjdk-12+27-windows", + "/jdk/openjdk-12+28-darwin", + "/jdk/openjdk-12+28-linux", + "/jdk/openjdk-12+28-windows", + "/jdk/openjdk-12+29-darwin", + "/jdk/openjdk-12+29-linux", + "/jdk/openjdk-12+29-windows", + "/jdk/openjdk-12+30-darwin", + "/jdk/openjdk-12+30-linux", + "/jdk/openjdk-12+30-windows", + "/jdk/openjdk-12+31-darwin", + "/jdk/openjdk-12+31-linux", + "/jdk/openjdk-12+31-windows", + "/jdk/openjdk-12+32-darwin", + "/jdk/openjdk-12+32-linux", + "/jdk/openjdk-12+32-windows", + "/jdk/openjdk-12+33-darwin", + "/jdk/openjdk-12+33-linux", + "/jdk/openjdk-12+33-windows", + "/jdk/openjdk-12-darwin", + "/jdk/openjdk-12-linux", + "/jdk/openjdk-12-windows", + "/jdk/openjdk-12.0.1-darwin", + "/jdk/openjdk-12.0.1-linux", + "/jdk/openjdk-12.0.1-windows", + "/jdk/openjdk-12.0.2-darwin", + "/jdk/openjdk-12.0.2-linux", + "/jdk/openjdk-12.0.2-windows", + "/jdk/openjdk-13+14-darwin", + "/jdk/openjdk-13+14-linux", + "/jdk/openjdk-13+14-windows", + "/jdk/openjdk-13+15-darwin", + "/jdk/openjdk-13+15-linux", + "/jdk/openjdk-13+15-windows", + "/jdk/openjdk-13+16-darwin", + "/jdk/openjdk-13+16-linux", + "/jdk/openjdk-13+16-windows", + "/jdk/openjdk-13+17-darwin", + "/jdk/openjdk-13+17-linux", + "/jdk/openjdk-13+17-windows", + "/jdk/openjdk-13+18-darwin", + "/jdk/openjdk-13+18-linux", + "/jdk/openjdk-13+18-windows", + "/jdk/openjdk-13+19-darwin", + "/jdk/openjdk-13+19-linux", + "/jdk/openjdk-13+19-windows", + "/jdk/openjdk-13+20-darwin", + "/jdk/openjdk-13+20-linux", + "/jdk/openjdk-13+20-windows", + "/jdk/openjdk-13+21-darwin", + "/jdk/openjdk-13+21-linux", + "/jdk/openjdk-13+21-windows", + "/jdk/openjdk-13+22-darwin", + "/jdk/openjdk-13+22-linux", + "/jdk/openjdk-13+22-windows", + "/jdk/openjdk-13+23-darwin", + "/jdk/openjdk-13+23-linux", + "/jdk/openjdk-13+23-windows", + "/jdk/openjdk-13+24-darwin", + "/jdk/openjdk-13+24-linux", + "/jdk/openjdk-13+24-windows", + "/jdk/openjdk-13+25-darwin", + "/jdk/openjdk-13+25-linux", + "/jdk/openjdk-13+25-windows", + "/jdk/openjdk-13+26-darwin", + "/jdk/openjdk-13+26-linux", + "/jdk/openjdk-13+26-windows", + "/jdk/openjdk-13+27-darwin", + "/jdk/openjdk-13+27-linux", + "/jdk/openjdk-13+27-windows", + "/jdk/openjdk-13+28-darwin", + "/jdk/openjdk-13+28-linux", + "/jdk/openjdk-13+28-windows", + "/jdk/openjdk-13+29-darwin", + "/jdk/openjdk-13+29-linux", + "/jdk/openjdk-13+29-windows", + "/jdk/openjdk-13+30-darwin", + "/jdk/openjdk-13+30-linux", + "/jdk/openjdk-13+30-windows", + "/jdk/openjdk-13+31-darwin", + "/jdk/openjdk-13+31-linux", + "/jdk/openjdk-13+31-windows", + "/jdk/openjdk-13+32-darwin", + "/jdk/openjdk-13+32-linux", + "/jdk/openjdk-13+32-windows", + "/jdk/openjdk-13-darwin", + "/jdk/openjdk-13-linux", + "/jdk/openjdk-13-windows", + "/jdk/openjdk-13.0.1-darwin", + "/jdk/openjdk-13.0.1-linux", + "/jdk/openjdk-13.0.1-windows", + "/jdk/openjdk-13.0.2-darwin", + "/jdk/openjdk-13.0.2-linux", + "/jdk/openjdk-13.0.2-windows", + "/jdk/openjdk-14+10-darwin", + "/jdk/openjdk-14+10-linux", + "/jdk/openjdk-14+10-windows", + "/jdk/openjdk-14+11-darwin", + "/jdk/openjdk-14+11-linux", + "/jdk/openjdk-14+11-windows", + "/jdk/openjdk-14+12-darwin", + "/jdk/openjdk-14+12-linux", + "/jdk/openjdk-14+12-windows", + "/jdk/openjdk-14+13-darwin", + "/jdk/openjdk-14+13-linux", + "/jdk/openjdk-14+13-windows", + "/jdk/openjdk-14+14-darwin", + "/jdk/openjdk-14+14-linux", + "/jdk/openjdk-14+14-windows", + "/jdk/openjdk-14+15-darwin", + "/jdk/openjdk-14+15-linux", + "/jdk/openjdk-14+15-windows", + "/jdk/openjdk-14+16-darwin", + "/jdk/openjdk-14+16-linux", + "/jdk/openjdk-14+16-windows", + "/jdk/openjdk-14+17-darwin", + "/jdk/openjdk-14+17-linux", + "/jdk/openjdk-14+17-windows", + "/jdk/openjdk-14+25-darwin", + "/jdk/openjdk-14+25-linux", + "/jdk/openjdk-14+25-windows", + "/jdk/openjdk-14+26-darwin", + "/jdk/openjdk-14+26-linux", + "/jdk/openjdk-14+26-windows", + "/jdk/openjdk-14+27-darwin", + "/jdk/openjdk-14+27-linux", + "/jdk/openjdk-14+27-windows", + "/jdk/openjdk-14+28-darwin", + "/jdk/openjdk-14+28-linux", + "/jdk/openjdk-14+28-windows", + "/jdk/openjdk-14+30-darwin", + "/jdk/openjdk-14+30-linux", + "/jdk/openjdk-14+30-windows", + "/jdk/openjdk-14+31-darwin", + "/jdk/openjdk-14+31-linux", + "/jdk/openjdk-14+31-windows", + "/jdk/openjdk-14+32-darwin", + "/jdk/openjdk-14+32-linux", + "/jdk/openjdk-14+32-windows", + "/jdk/openjdk-14+33-darwin", + "/jdk/openjdk-14+33-linux", + "/jdk/openjdk-14+33-windows", + "/jdk/openjdk-14+34-darwin", + "/jdk/openjdk-14+34-linux", + "/jdk/openjdk-14+34-windows", + "/jdk/openjdk-14+9-darwin", + "/jdk/openjdk-14+9-linux", + "/jdk/openjdk-14+9-windows", + "/jdk/openjdk-14-darwin", + "/jdk/openjdk-14-linux", + "/jdk/openjdk-14-windows", + "/jdk/openjdk-14.0.1-darwin", + "/jdk/openjdk-14.0.1-linux", + "/jdk/openjdk-14.0.1-windows", + "/jdk/openjdk-14.0.2+12-darwin", + "/jdk/openjdk-14.0.2+12-linux", + "/jdk/openjdk-14.0.2+12-windows", + "/jdk/openjdk-14.0.2-darwin", + "/jdk/openjdk-14.0.2-linux", + "/jdk/openjdk-14.0.2-windows", + "/jdk/openjdk-15+10-darwin", + "/jdk/openjdk-15+10-linux", + "/jdk/openjdk-15+10-windows", + "/jdk/openjdk-15+11-darwin", + "/jdk/openjdk-15+11-linux", + "/jdk/openjdk-15+11-windows", + "/jdk/openjdk-15+12-darwin", + "/jdk/openjdk-15+12-linux", + "/jdk/openjdk-15+12-windows", + "/jdk/openjdk-15+13-darwin", + "/jdk/openjdk-15+13-linux", + "/jdk/openjdk-15+13-windows", + "/jdk/openjdk-15+14-darwin", + "/jdk/openjdk-15+14-linux", + "/jdk/openjdk-15+14-windows", + "/jdk/openjdk-15+15-darwin", + "/jdk/openjdk-15+15-linux", + "/jdk/openjdk-15+15-windows", + "/jdk/openjdk-15+16-darwin", + "/jdk/openjdk-15+16-linux", + "/jdk/openjdk-15+16-windows", + "/jdk/openjdk-15+17-darwin", + "/jdk/openjdk-15+17-linux", + "/jdk/openjdk-15+17-windows", + "/jdk/openjdk-15+18-darwin", + "/jdk/openjdk-15+18-linux", + "/jdk/openjdk-15+18-windows", + "/jdk/openjdk-15+19-darwin", + "/jdk/openjdk-15+19-linux", + "/jdk/openjdk-15+19-windows", + "/jdk/openjdk-15+20-darwin", + "/jdk/openjdk-15+20-linux", + "/jdk/openjdk-15+20-windows", + "/jdk/openjdk-15+21-darwin", + "/jdk/openjdk-15+21-linux", + "/jdk/openjdk-15+21-windows", + "/jdk/openjdk-15+22-darwin", + "/jdk/openjdk-15+22-linux", + "/jdk/openjdk-15+22-windows", + "/jdk/openjdk-15+23-darwin", + "/jdk/openjdk-15+23-linux", + "/jdk/openjdk-15+23-windows", + "/jdk/openjdk-15+24-darwin", + "/jdk/openjdk-15+24-linux", + "/jdk/openjdk-15+24-windows", + "/jdk/openjdk-15+25-darwin", + "/jdk/openjdk-15+25-linux", + "/jdk/openjdk-15+25-windows", + "/jdk/openjdk-15+26-darwin", + "/jdk/openjdk-15+26-linux", + "/jdk/openjdk-15+26-windows", + "/jdk/openjdk-15+27-darwin", + "/jdk/openjdk-15+27-linux", + "/jdk/openjdk-15+27-windows", + "/jdk/openjdk-15+28-darwin", + "/jdk/openjdk-15+28-linux", + "/jdk/openjdk-15+28-windows", + "/jdk/openjdk-15+29-darwin", + "/jdk/openjdk-15+29-linux", + "/jdk/openjdk-15+29-windows", + "/jdk/openjdk-15+30-darwin", + "/jdk/openjdk-15+30-linux", + "/jdk/openjdk-15+30-windows", + "/jdk/openjdk-15+31-darwin", + "/jdk/openjdk-15+31-linux", + "/jdk/openjdk-15+31-windows", + "/jdk/openjdk-15+32-darwin", + "/jdk/openjdk-15+32-linux", + "/jdk/openjdk-15+32-windows", + "/jdk/openjdk-15+33-darwin", + "/jdk/openjdk-15+33-linux", + "/jdk/openjdk-15+33-windows", + "/jdk/openjdk-15+34-darwin", + "/jdk/openjdk-15+34-linux", + "/jdk/openjdk-15+34-windows", + "/jdk/openjdk-15+36-darwin", + "/jdk/openjdk-15+36-linux", + "/jdk/openjdk-15+36-windows", + "/jdk/openjdk-15+4-darwin", + "/jdk/openjdk-15+4-linux", + "/jdk/openjdk-15+4-windows", + "/jdk/openjdk-15+5-darwin", + "/jdk/openjdk-15+5-linux", + "/jdk/openjdk-15+5-windows", + "/jdk/openjdk-15+6-darwin", + "/jdk/openjdk-15+6-linux", + "/jdk/openjdk-15+6-windows", + "/jdk/openjdk-15+7-darwin", + "/jdk/openjdk-15+7-linux", + "/jdk/openjdk-15+7-windows", + "/jdk/openjdk-15+8-darwin", + "/jdk/openjdk-15+8-linux", + "/jdk/openjdk-15+8-windows", + "/jdk/openjdk-15+9-darwin", + "/jdk/openjdk-15+9-linux", + "/jdk/openjdk-15+9-windows", + "/jdk/openjdk-15-darwin", + "/jdk/openjdk-15-linux", + "/jdk/openjdk-15-windows", + "/jdk/openjdk-15.0.1+9-darwin", + "/jdk/openjdk-15.0.1+9-linux", + "/jdk/openjdk-15.0.1+9-windows", + "/jdk/openjdk-15.0.2+7-darwin", + "/jdk/openjdk-15.0.2+7-linux", + "/jdk/openjdk-15.0.2+7-linux-aarch64", + "/jdk/openjdk-15.0.2+7-windows", + "/jdk/openjdk-16+28-darwin", + "/jdk/openjdk-16+28-linux", + "/jdk/openjdk-16+28-windows", + "/jdk/openjdk-16+29-darwin", + "/jdk/openjdk-16+29-linux", + "/jdk/openjdk-16+29-windows", + "/jdk/openjdk-16+30-darwin", + "/jdk/openjdk-16+30-linux", + "/jdk/openjdk-16+30-windows", + "/jdk/openjdk-16+31-darwin", + "/jdk/openjdk-16+31-linux", + "/jdk/openjdk-16+31-windows", + "/jdk/openjdk-16+32-darwin", + "/jdk/openjdk-16+32-linux", + "/jdk/openjdk-16+32-windows", + "/jdk/openjdk-16+33-darwin", + "/jdk/openjdk-16+33-linux", + "/jdk/openjdk-16+33-windows", + "/jdk/openjdk-16+34-darwin", + "/jdk/openjdk-16+34-linux", + "/jdk/openjdk-16+34-windows", + "/jdk/openjdk-16+35-darwin", + "/jdk/openjdk-16+35-linux", + "/jdk/openjdk-16+35-windows", + "/jdk/openjdk-16+36-darwin", + "/jdk/openjdk-16+36-linux", + "/jdk/openjdk-16+36-linux-aarch64", + "/jdk/openjdk-16+36-windows", + "/jdk/openjdk-16.0.1+9-darwin", + "/jdk/openjdk-16.0.1+9-linux", + "/jdk/openjdk-16.0.1+9-linux-aarch64", + "/jdk/openjdk-16.0.1+9-windows", + "/jdk/openjdk-16.0.2+7-darwin", + "/jdk/openjdk-16.0.2+7-linux", + "/jdk/openjdk-16.0.2+7-linux-aarch64", + "/jdk/openjdk-16.0.2+7-windows", + "/jdk/openjdk-17+17-darwin", + "/jdk/openjdk-17+17-linux", + "/jdk/openjdk-17+17-linux-aarch64", + "/jdk/openjdk-17+17-windows", + "/jdk/openjdk-17+18-darwin", + "/jdk/openjdk-17+18-linux", + "/jdk/openjdk-17+18-linux-aarch64", + "/jdk/openjdk-17+18-windows", + "/jdk/openjdk-17+19-darwin", + "/jdk/openjdk-17+19-linux", + "/jdk/openjdk-17+19-linux-aarch64", + "/jdk/openjdk-17+19-windows", + "/jdk/openjdk-17+20-darwin", + "/jdk/openjdk-17+20-linux", + "/jdk/openjdk-17+20-linux-aarch64", + "/jdk/openjdk-17+20-windows", + "/jdk/openjdk-17+21-linux", + "/jdk/openjdk-17+21-linux-aarch64", + "/jdk/openjdk-17+21-windows", + "/jdk/openjdk-17+22-linux", + "/jdk/openjdk-17+22-linux-aarch64", + "/jdk/openjdk-17+22-windows", + "/jdk/openjdk-17+23-linux", + "/jdk/openjdk-17+23-linux-aarch64", + "/jdk/openjdk-17+23-windows", + "/jdk/openjdk-17+24-linux", + "/jdk/openjdk-17+24-linux-aarch64", + "/jdk/openjdk-17+24-windows", + "/jdk/openjdk-17+25-linux", + "/jdk/openjdk-17+25-linux-aarch64", + "/jdk/openjdk-17+25-windows", + "/jdk/openjdk-17+26-linux", + "/jdk/openjdk-17+26-linux-aarch64", + "/jdk/openjdk-17+26-windows", + "/jdk/openjdk-17+27-linux", + "/jdk/openjdk-17+27-linux-aarch64", + "/jdk/openjdk-17+27-windows", + "/jdk/openjdk-17+28-linux", + "/jdk/openjdk-17+28-linux-aarch64", + "/jdk/openjdk-17+28-windows", + "/jdk/openjdk-17+29-linux", + "/jdk/openjdk-17+29-linux-aarch64", + "/jdk/openjdk-17+29-windows", + "/jdk/openjdk-17+30-linux", + "/jdk/openjdk-17+30-linux-aarch64", + "/jdk/openjdk-17+30-windows", + "/jdk/openjdk-17+31-linux", + "/jdk/openjdk-17+31-linux-aarch64", + "/jdk/openjdk-17+31-windows", + "/jdk/openjdk-17+32-linux", + "/jdk/openjdk-17+32-linux-aarch64", + "/jdk/openjdk-17+32-windows", + "/jdk/openjdk-17+33-linux", + "/jdk/openjdk-17+33-linux-aarch64", + "/jdk/openjdk-17+33-windows", + "/jdk/openjdk-17+35-linux", + "/jdk/openjdk-17+35-linux-aarch64", + "/jdk/openjdk-17+35-windows", + "/jdk/openjdk-17.0.1+12-darwin", + "/jdk/openjdk-17.0.1+12-darwin-aarch64", + "/jdk/openjdk-17.0.1+12-linux", + "/jdk/openjdk-17.0.1+12-linux-aarch64", + "/jdk/openjdk-17.0.1+12-windows", + "/jdk/openjdk-17.0.2+8-darwin", + "/jdk/openjdk-17.0.2+8-darwin-aarch64", + "/jdk/openjdk-17.0.2+8-linux", + "/jdk/openjdk-17.0.2+8-linux-aarch64", + "/jdk/openjdk-17.0.2+8-windows", + "/jdk/openjdk-18+22-darwin", + "/jdk/openjdk-18+22-darwin-aarch64", + "/jdk/openjdk-18+22-linux", + "/jdk/openjdk-18+22-linux-aarch64", + "/jdk/openjdk-18+22-windows", + "/jdk/openjdk-18+23-darwin", + "/jdk/openjdk-18+23-darwin-aarch64", + "/jdk/openjdk-18+23-linux", + "/jdk/openjdk-18+23-linux-aarch64", + "/jdk/openjdk-18+23-windows", + "/jdk/openjdk-18+24-darwin", + "/jdk/openjdk-18+24-darwin-aarch64", + "/jdk/openjdk-18+24-linux", + "/jdk/openjdk-18+24-linux-aarch64", + "/jdk/openjdk-18+24-windows", + "/jdk/openjdk-18+25-darwin", + "/jdk/openjdk-18+25-darwin-aarch64", + "/jdk/openjdk-18+25-linux", + "/jdk/openjdk-18+25-linux-aarch64", + "/jdk/openjdk-18+25-windows", + "/jdk/openjdk-18+26-darwin", + "/jdk/openjdk-18+26-darwin-aarch64", + "/jdk/openjdk-18+26-linux", + "/jdk/openjdk-18+26-linux-aarch64", + "/jdk/openjdk-18+26-windows", + "/jdk/openjdk-18+27-darwin", + "/jdk/openjdk-18+27-darwin-aarch64", + "/jdk/openjdk-18+27-linux", + "/jdk/openjdk-18+27-linux-aarch64", + "/jdk/openjdk-18+27-windows", + "/jdk/openjdk-18+28-darwin", + "/jdk/openjdk-18+28-darwin-aarch64", + "/jdk/openjdk-18+28-linux", + "/jdk/openjdk-18+28-linux-aarch64", + "/jdk/openjdk-18+28-windows", + "/jdk/openjdk-18+29-darwin", + "/jdk/openjdk-18+29-darwin-aarch64", + "/jdk/openjdk-18+29-linux", + "/jdk/openjdk-18+29-linux-aarch64", + "/jdk/openjdk-18+29-windows", + "/jdk/openjdk-18+30-darwin", + "/jdk/openjdk-18+30-darwin-aarch64", + "/jdk/openjdk-18+30-linux", + "/jdk/openjdk-18+30-linux-aarch64", + "/jdk/openjdk-18+30-windows", + "/jdk/openjdk-18+31-darwin", + "/jdk/openjdk-18+31-darwin-aarch64", + "/jdk/openjdk-18+31-linux", + "/jdk/openjdk-18+31-linux-aarch64", + "/jdk/openjdk-18+31-windows", + "/jdk/openjdk-18+33-darwin", + "/jdk/openjdk-18+33-darwin-aarch64", + "/jdk/openjdk-18+33-linux", + "/jdk/openjdk-18+33-linux-aarch64", + "/jdk/openjdk-18+33-windows", + "/jdk/openjdk-18+34-darwin", + "/jdk/openjdk-18+34-darwin-aarch64", + "/jdk/openjdk-18+34-linux", + "/jdk/openjdk-18+34-linux-aarch64", + "/jdk/openjdk-18+34-windows", + "/jdk/openjdk-18+35-darwin", + "/jdk/openjdk-18+35-darwin-aarch64", + "/jdk/openjdk-18+35-linux", + "/jdk/openjdk-18+35-linux-aarch64", + "/jdk/openjdk-18+35-windows", + "/jdk/openjdk-18+36-darwin", + "/jdk/openjdk-18+36-darwin-aarch64", + "/jdk/openjdk-18+36-linux", + "/jdk/openjdk-18+36-linux-aarch64", + "/jdk/openjdk-18+36-windows", + "/jdk/openjdk-18.0.1+10-darwin", + "/jdk/openjdk-18.0.1+10-darwin-aarch64", + "/jdk/openjdk-18.0.1+10-linux", + "/jdk/openjdk-18.0.1+10-linux-aarch64", + "/jdk/openjdk-18.0.1+10-windows", + "/jdk/openjdk-18.0.1.1+2-darwin", + "/jdk/openjdk-18.0.1.1+2-darwin-aarch64", + "/jdk/openjdk-18.0.1.1+2-linux", + "/jdk/openjdk-18.0.1.1+2-linux-aarch64", + "/jdk/openjdk-18.0.1.1+2-windows", + "/jdk/openjdk-18.0.2+9-darwin", + "/jdk/openjdk-18.0.2+9-darwin-aarch64", + "/jdk/openjdk-18.0.2+9-linux", + "/jdk/openjdk-18.0.2+9-linux-aarch64", + "/jdk/openjdk-18.0.2+9-windows", + "/jdk/openjdk-18.0.2.1+1-darwin", + "/jdk/openjdk-18.0.2.1+1-darwin-aarch64", + "/jdk/openjdk-18.0.2.1+1-linux", + "/jdk/openjdk-18.0.2.1+1-linux-aarch64", + "/jdk/openjdk-18.0.2.1+1-windows", + "/jdk/openjdk-19+14-darwin", + "/jdk/openjdk-19+14-darwin-aarch64", + "/jdk/openjdk-19+14-linux", + "/jdk/openjdk-19+14-linux-aarch64", + "/jdk/openjdk-19+14-windows", + "/jdk/openjdk-19+15-darwin", + "/jdk/openjdk-19+15-darwin-aarch64", + "/jdk/openjdk-19+15-linux", + "/jdk/openjdk-19+15-linux-aarch64", + "/jdk/openjdk-19+15-windows", + "/jdk/openjdk-19+16-darwin", + "/jdk/openjdk-19+16-darwin-aarch64", + "/jdk/openjdk-19+16-linux", + "/jdk/openjdk-19+16-linux-aarch64", + "/jdk/openjdk-19+16-windows", + "/jdk/openjdk-19+17-darwin", + "/jdk/openjdk-19+17-darwin-aarch64", + "/jdk/openjdk-19+17-linux", + "/jdk/openjdk-19+17-linux-aarch64", + "/jdk/openjdk-19+17-windows", + "/jdk/openjdk-19+18-darwin", + "/jdk/openjdk-19+18-darwin-aarch64", + "/jdk/openjdk-19+18-linux", + "/jdk/openjdk-19+18-linux-aarch64", + "/jdk/openjdk-19+18-windows", + "/jdk/openjdk-19+19-darwin", + "/jdk/openjdk-19+19-darwin-aarch64", + "/jdk/openjdk-19+19-linux", + "/jdk/openjdk-19+19-linux-aarch64", + "/jdk/openjdk-19+19-windows", + "/jdk/openjdk-19+20-darwin", + "/jdk/openjdk-19+20-darwin-aarch64", + "/jdk/openjdk-19+20-linux", + "/jdk/openjdk-19+20-linux-aarch64", + "/jdk/openjdk-19+20-windows", + "/jdk/openjdk-19+21-darwin", + "/jdk/openjdk-19+21-darwin-aarch64", + "/jdk/openjdk-19+21-linux", + "/jdk/openjdk-19+21-linux-aarch64", + "/jdk/openjdk-19+21-windows", + "/jdk/openjdk-19+22-darwin", + "/jdk/openjdk-19+22-darwin-aarch64", + "/jdk/openjdk-19+22-linux", + "/jdk/openjdk-19+22-linux-aarch64", + "/jdk/openjdk-19+22-windows", + "/jdk/openjdk-19+23-darwin", + "/jdk/openjdk-19+23-darwin-aarch64", + "/jdk/openjdk-19+23-linux", + "/jdk/openjdk-19+23-linux-aarch64", + "/jdk/openjdk-19+23-windows", + "/jdk/openjdk-19+24-darwin", + "/jdk/openjdk-19+24-darwin-aarch64", + "/jdk/openjdk-19+24-linux", + "/jdk/openjdk-19+24-linux-aarch64", + "/jdk/openjdk-19+24-windows", + "/jdk/openjdk-19+25-darwin", + "/jdk/openjdk-19+25-darwin-aarch64", + "/jdk/openjdk-19+25-linux", + "/jdk/openjdk-19+25-linux-aarch64", + "/jdk/openjdk-19+25-windows", + "/jdk/openjdk-19+26-darwin", + "/jdk/openjdk-19+26-darwin-aarch64", + "/jdk/openjdk-19+26-linux", + "/jdk/openjdk-19+26-linux-aarch64", + "/jdk/openjdk-19+26-windows", + "/jdk/openjdk-19+27-darwin", + "/jdk/openjdk-19+27-darwin-aarch64", + "/jdk/openjdk-19+27-linux", + "/jdk/openjdk-19+27-linux-aarch64", + "/jdk/openjdk-19+27-windows", + "/jdk/openjdk-19+28-darwin", + "/jdk/openjdk-19+28-darwin-aarch64", + "/jdk/openjdk-19+28-linux", + "/jdk/openjdk-19+28-linux-aarch64", + "/jdk/openjdk-19+28-windows", + "/jdk/openjdk-19+29-darwin", + "/jdk/openjdk-19+29-darwin-aarch64", + "/jdk/openjdk-19+29-linux", + "/jdk/openjdk-19+29-linux-aarch64", + "/jdk/openjdk-19+29-windows", + "/jdk/openjdk-19+30-darwin", + "/jdk/openjdk-19+30-darwin-aarch64", + "/jdk/openjdk-19+30-linux", + "/jdk/openjdk-19+30-linux-aarch64", + "/jdk/openjdk-19+30-windows", + "/jdk/openjdk-19+31-darwin", + "/jdk/openjdk-19+31-darwin-aarch64", + "/jdk/openjdk-19+31-linux", + "/jdk/openjdk-19+31-linux-aarch64", + "/jdk/openjdk-19+31-windows", + "/jdk/openjdk-19+32-darwin", + "/jdk/openjdk-19+32-darwin-aarch64", + "/jdk/openjdk-19+32-linux", + "/jdk/openjdk-19+32-linux-aarch64", + "/jdk/openjdk-19+32-windows", + "/jdk/openjdk-19+33-darwin", + "/jdk/openjdk-19+33-darwin-aarch64", + "/jdk/openjdk-19+33-linux", + "/jdk/openjdk-19+33-linux-aarch64", + "/jdk/openjdk-19+33-windows", + "/jdk/openjdk-19+34-darwin", + "/jdk/openjdk-19+34-darwin-aarch64", + "/jdk/openjdk-19+34-linux", + "/jdk/openjdk-19+34-linux-aarch64", + "/jdk/openjdk-19+34-windows", + "/jdk/openjdk-19+35-darwin", + "/jdk/openjdk-19+35-darwin-aarch64", + "/jdk/openjdk-19+35-linux", + "/jdk/openjdk-19+35-linux-aarch64", + "/jdk/openjdk-19+35-windows", + "/jdk/openjdk-19+36-darwin", + "/jdk/openjdk-19+36-darwin-aarch64", + "/jdk/openjdk-19+36-linux", + "/jdk/openjdk-19+36-linux-aarch64", + "/jdk/openjdk-19+36-windows", + "/jdk/openjdk-19.0.1+10-darwin", + "/jdk/openjdk-19.0.1+10-darwin-aarch64", + "/jdk/openjdk-19.0.1+10-linux", + "/jdk/openjdk-19.0.1+10-linux-aarch64", + "/jdk/openjdk-19.0.1+10-windows", + "/jdk/openjdk-19.0.2+7-darwin", + "/jdk/openjdk-19.0.2+7-darwin-aarch64", + "/jdk/openjdk-19.0.2+7-linux", + "/jdk/openjdk-19.0.2+7-linux-aarch64", + "/jdk/openjdk-19.0.2+7-windows", + "/jdk/openjdk-20+33-darwin", + "/jdk/openjdk-20+33-darwin-aarch64", + "/jdk/openjdk-20+33-linux", + "/jdk/openjdk-20+33-linux-aarch64", + "/jdk/openjdk-20+33-windows", + "/jdk/openjdk-20+34-darwin", + "/jdk/openjdk-20+34-darwin-aarch64", + "/jdk/openjdk-20+34-linux", + "/jdk/openjdk-20+34-linux-aarch64", + "/jdk/openjdk-20+34-windows", + "/jdk/openjdk-20+35-darwin", + "/jdk/openjdk-20+35-darwin-aarch64", + "/jdk/openjdk-20+35-linux", + "/jdk/openjdk-20+35-linux-aarch64", + "/jdk/openjdk-20+35-windows", + "/jdk/openjdk-20+36-darwin", + "/jdk/openjdk-20+36-darwin-aarch64", + "/jdk/openjdk-20+36-linux", + "/jdk/openjdk-20+36-linux-aarch64", + "/jdk/openjdk-20+36-windows", + "/jdk/openjdk-20.0.1+9-darwin", + "/jdk/openjdk-20.0.1+9-darwin-aarch64", + "/jdk/openjdk-20.0.1+9-linux", + "/jdk/openjdk-20.0.1+9-linux-aarch64", + "/jdk/openjdk-20.0.1+9-windows", + "/jdk/openjdk-20.0.2+9-darwin", + "/jdk/openjdk-20.0.2+9-darwin-aarch64", + "/jdk/openjdk-20.0.2+9-linux", + "/jdk/openjdk-20.0.2+9-linux-aarch64", + "/jdk/openjdk-20.0.2+9-windows", + "/jdk/openjdk-21+25-darwin", + "/jdk/openjdk-21+25-darwin-aarch64", + "/jdk/openjdk-21+25-linux", + "/jdk/openjdk-21+25-linux-aarch64", + "/jdk/openjdk-21+25-windows", + "/jdk/openjdk-21+26-darwin", + "/jdk/openjdk-21+26-darwin-aarch64", + "/jdk/openjdk-21+26-linux", + "/jdk/openjdk-21+26-linux-aarch64", + "/jdk/openjdk-21+26-windows", + "/jdk/openjdk-21+27-darwin", + "/jdk/openjdk-21+27-darwin-aarch64", + "/jdk/openjdk-21+27-linux", + "/jdk/openjdk-21+27-linux-aarch64", + "/jdk/openjdk-21+27-windows", + "/jdk/openjdk-21+28-darwin", + "/jdk/openjdk-21+28-darwin-aarch64", + "/jdk/openjdk-21+28-linux", + "/jdk/openjdk-21+28-linux-aarch64", + "/jdk/openjdk-21+28-windows", + "/jdk/openjdk-21+29-darwin", + "/jdk/openjdk-21+29-darwin-aarch64", + "/jdk/openjdk-21+29-linux", + "/jdk/openjdk-21+29-linux-aarch64", + "/jdk/openjdk-21+29-windows", + "/jdk/openjdk-21+30-darwin", + "/jdk/openjdk-21+30-darwin-aarch64", + "/jdk/openjdk-21+30-linux", + "/jdk/openjdk-21+30-linux-aarch64", + "/jdk/openjdk-21+30-windows", + "/jdk/openjdk-21+31-darwin", + "/jdk/openjdk-21+31-darwin-aarch64", + "/jdk/openjdk-21+31-linux", + "/jdk/openjdk-21+31-linux-aarch64", + "/jdk/openjdk-21+31-windows", + "/jdk/openjdk-21+32-darwin", + "/jdk/openjdk-21+32-darwin-aarch64", + "/jdk/openjdk-21+32-linux", + "/jdk/openjdk-21+32-linux-aarch64", + "/jdk/openjdk-21+32-windows", + "/jdk/openjdk-21+33-darwin", + "/jdk/openjdk-21+33-darwin-aarch64", + "/jdk/openjdk-21+33-linux", + "/jdk/openjdk-21+33-linux-aarch64", + "/jdk/openjdk-21+33-windows", + "/jdk/openjdk-21+34-darwin", + "/jdk/openjdk-21+34-darwin-aarch64", + "/jdk/openjdk-21+34-linux", + "/jdk/openjdk-21+34-linux-aarch64", + "/jdk/openjdk-21+34-windows", + "/jdk/openjdk-21+35-darwin", + "/jdk/openjdk-21+35-darwin-aarch64", + "/jdk/openjdk-21+35-linux", + "/jdk/openjdk-21+35-linux-aarch64", + "/jdk/openjdk-21+35-windows", + "/jdk/openjdk-21.0.1+12-darwin", + "/jdk/openjdk-21.0.1+12-darwin-aarch64", + "/jdk/openjdk-21.0.1+12-linux", + "/jdk/openjdk-21.0.1+12-linux-aarch64", + "/jdk/openjdk-21.0.1+12-windows", + "/jdk/openjdk-21.0.2+13-darwin", + "/jdk/openjdk-21.0.2+13-darwin-aarch64", + "/jdk/openjdk-21.0.2+13-linux", + "/jdk/openjdk-21.0.2+13-linux-aarch64", + "/jdk/openjdk-21.0.2+13-windows", + "/jdk/openjdk-22+28-darwin", + "/jdk/openjdk-22+28-darwin-aarch64", + "/jdk/openjdk-22+28-linux", + "/jdk/openjdk-22+28-linux-aarch64", + "/jdk/openjdk-22+28-windows", + "/jdk/openjdk-22+29-darwin", + "/jdk/openjdk-22+29-darwin-aarch64", + "/jdk/openjdk-22+29-linux", + "/jdk/openjdk-22+29-linux-aarch64", + "/jdk/openjdk-22+29-windows", + "/jdk/openjdk-22+30-darwin", + "/jdk/openjdk-22+30-darwin-aarch64", + "/jdk/openjdk-22+30-linux", + "/jdk/openjdk-22+30-linux-aarch64", + "/jdk/openjdk-22+30-windows", + "/jdk/openjdk-22+31-darwin", + "/jdk/openjdk-22+31-darwin-aarch64", + "/jdk/openjdk-22+31-linux", + "/jdk/openjdk-22+31-linux-aarch64", + "/jdk/openjdk-22+31-windows", + "/jdk/openjdk-22+32-darwin", + "/jdk/openjdk-22+32-darwin-aarch64", + "/jdk/openjdk-22+32-linux", + "/jdk/openjdk-22+32-linux-aarch64", + "/jdk/openjdk-22+32-windows", + "/jdk/openjdk-22+33-darwin", + "/jdk/openjdk-22+33-darwin-aarch64", + "/jdk/openjdk-22+33-linux", + "/jdk/openjdk-22+33-linux-aarch64", + "/jdk/openjdk-22+33-windows", + "/jdk/openjdk-22+34-darwin", + "/jdk/openjdk-22+34-darwin-aarch64", + "/jdk/openjdk-22+34-linux", + "/jdk/openjdk-22+34-linux-aarch64", + "/jdk/openjdk-22+34-windows", + "/jdk/openjdk-22+35-darwin", + "/jdk/openjdk-22+35-darwin-aarch64", + "/jdk/openjdk-22+35-linux", + "/jdk/openjdk-22+35-linux-aarch64", + "/jdk/openjdk-22+35-windows", + "/jdk/openjdk-22+36-darwin", + "/jdk/openjdk-22+36-darwin-aarch64", + "/jdk/openjdk-22+36-linux", + "/jdk/openjdk-22+36-linux-aarch64", + "/jdk/openjdk-22+36-windows", + "/jdk/openjdk-22.0.1+8-darwin", + "/jdk/openjdk-22.0.1+8-darwin-aarch64", + "/jdk/openjdk-22.0.1+8-linux", + "/jdk/openjdk-22.0.1+8-linux-aarch64", + "/jdk/openjdk-22.0.1+8-windows", + "/jdk/openjdk-23+22-darwin", + "/jdk/openjdk-23+22-darwin-aarch64", + "/jdk/openjdk-23+22-linux", + "/jdk/openjdk-23+22-linux-aarch64", + "/jdk/openjdk-23+22-windows", + "/jdk/openjdk-23+23-darwin", + "/jdk/openjdk-23+23-darwin-aarch64", + "/jdk/openjdk-23+23-linux", + "/jdk/openjdk-23+23-linux-aarch64", + "/jdk/openjdk-23+23-windows", + "/jdk/openjdk-23+24-darwin", + "/jdk/openjdk-23+24-darwin-aarch64", + "/jdk/openjdk-23+24-linux", + "/jdk/openjdk-23+24-linux-aarch64", + "/jdk/openjdk-23+24-windows", + "/jdk/openjdk-23+25-darwin", + "/jdk/openjdk-23+25-darwin-aarch64", + "/jdk/openjdk-23+25-linux", + "/jdk/openjdk-23+25-linux-aarch64", + "/jdk/openjdk-23+25-windows", + "/jdk/openjdk-23+26-darwin", + "/jdk/openjdk-23+26-darwin-aarch64", + "/jdk/openjdk-23+26-linux", + "/jdk/openjdk-23+26-linux-aarch64", + "/jdk/openjdk-23+26-windows", + "/jdk/openjdk-23+27-darwin", + "/jdk/openjdk-23+27-darwin-aarch64", + "/jdk/openjdk-23+27-linux", + "/jdk/openjdk-23+27-linux-aarch64", + "/jdk/openjdk-23+27-windows", + "/jdk/openjdk-23+28-darwin", + "/jdk/openjdk-23+28-darwin-aarch64", + "/jdk/openjdk-23+28-linux", + "/jdk/openjdk-23+28-linux-aarch64", + "/jdk/openjdk-23+28-windows", + "/jdk/openjdk-23+29-darwin", + "/jdk/openjdk-23+29-darwin-aarch64", + "/jdk/openjdk-23+29-linux", + "/jdk/openjdk-23+29-linux-aarch64", + "/jdk/openjdk-23+29-windows", + "/jdk/openjdk-23+30-darwin", + "/jdk/openjdk-23+30-darwin-aarch64", + "/jdk/openjdk-23+30-linux", + "/jdk/openjdk-23+30-linux-aarch64", + "/jdk/openjdk-23+30-windows", + "/jdk/openjdk-9.0.4-darwin", + "/jdk/openjdk-9.0.4-linux", + "/jdk/openjdk-9.0.4-windows", + "/jdk/oracle-10+43-darwin", + "/jdk/oracle-10+43-linux", + "/jdk/oracle-10+43-windows", + "/jdk/oracle-10+46-darwin", + "/jdk/oracle-10+46-linux", + "/jdk/oracle-10+46-windows", + "/jdk/oracle-11+11-darwin", + "/jdk/oracle-11+11-linux", + "/jdk/oracle-11+11-windows", + "/jdk/oracle-11+12-darwin", + "/jdk/oracle-11+12-linux", + "/jdk/oracle-11+12-windows", + "/jdk/oracle-11+13-darwin", + "/jdk/oracle-11+13-linux", + "/jdk/oracle-11+13-windows", + "/jdk/oracle-11+14-darwin", + "/jdk/oracle-11+14-linux", + "/jdk/oracle-11+14-windows", + "/jdk/oracle-11+15-darwin", + "/jdk/oracle-11+15-linux", + "/jdk/oracle-11+15-windows", + "/jdk/oracle-11+16-darwin", + "/jdk/oracle-11+16-linux", + "/jdk/oracle-11+16-windows", + "/jdk/oracle-11+17-darwin", + "/jdk/oracle-11+17-linux", + "/jdk/oracle-11+17-windows", + "/jdk/oracle-11+18-darwin", + "/jdk/oracle-11+18-linux", + "/jdk/oracle-11+18-windows", + "/jdk/oracle-11+19-darwin", + "/jdk/oracle-11+19-linux", + "/jdk/oracle-11+19-windows", + "/jdk/oracle-11+20-darwin", + "/jdk/oracle-11+20-linux", + "/jdk/oracle-11+20-windows", + "/jdk/oracle-11+21-darwin", + "/jdk/oracle-11+21-linux", + "/jdk/oracle-11+21-windows", + "/jdk/oracle-11+22-darwin", + "/jdk/oracle-11+22-linux", + "/jdk/oracle-11+22-windows", + "/jdk/oracle-11+23-darwin", + "/jdk/oracle-11+23-linux", + "/jdk/oracle-11+23-windows", + "/jdk/oracle-11+24-darwin", + "/jdk/oracle-11+24-linux", + "/jdk/oracle-11+24-windows", + "/jdk/oracle-11+25-darwin", + "/jdk/oracle-11+25-linux", + "/jdk/oracle-11+25-windows", + "/jdk/oracle-11+26-darwin", + "/jdk/oracle-11+26-linux", + "/jdk/oracle-11+26-windows", + "/jdk/oracle-11+27-darwin", + "/jdk/oracle-11+27-linux", + "/jdk/oracle-11+27-windows", + "/jdk/oracle-11+28-darwin", + "/jdk/oracle-11+28-linux", + "/jdk/oracle-11+28-windows", + "/jdk/oracle-11+5-darwin", + "/jdk/oracle-11+5-linux", + "/jdk/oracle-11+5-windows", + "/jdk/oracle-11.0.11-darwin", + "/jdk/oracle-11.0.11-linux", + "/jdk/oracle-11.0.11-linux-aarch64", + "/jdk/oracle-11.0.11-windows", + "/jdk/oracle-11.0.12+8-darwin", + "/jdk/oracle-11.0.12+8-linux", + "/jdk/oracle-11.0.12+8-linux-aarch64", + "/jdk/oracle-11.0.12+8-windows", + "/jdk/oracle-11.0.2+7-darwin", + "/jdk/oracle-11.0.2+7-linux", + "/jdk/oracle-11.0.2+7-windows", + "/jdk/oracle-11.0.2+9-darwin", + "/jdk/oracle-11.0.2+9-linux", + "/jdk/oracle-11.0.2+9-windows", + "/jdk/oracle-11.0.3+12-darwin", + "/jdk/oracle-11.0.3+12-linux", + "/jdk/oracle-11.0.3+12-windows", + "/jdk/oracle-11.0.4+10-darwin", + "/jdk/oracle-11.0.4+10-linux", + "/jdk/oracle-11.0.4+10-windows", + "/jdk/oracle-11.0.5+10-darwin", + "/jdk/oracle-11.0.5+10-linux", + "/jdk/oracle-11.0.5+10-windows", + "/jdk/oracle-11.0.6+8-darwin", + "/jdk/oracle-11.0.6+8-linux", + "/jdk/oracle-11.0.6+8-windows", + "/jdk/oracle-12+33-darwin", + "/jdk/oracle-12+33-linux", + "/jdk/oracle-12+33-windows", + "/jdk/oracle-12.0.1+12-darwin", + "/jdk/oracle-12.0.1+12-linux", + "/jdk/oracle-12.0.1+12-windows", + "/jdk/oracle-12.0.2+10-darwin", + "/jdk/oracle-12.0.2+10-linux", + "/jdk/oracle-12.0.2+10-windows", + "/jdk/oracle-13+33-darwin", + "/jdk/oracle-13+33-linux", + "/jdk/oracle-13+33-windows", + "/jdk/oracle-13.0.1+9-darwin", + "/jdk/oracle-13.0.1+9-linux", + "/jdk/oracle-13.0.1+9-windows", + "/jdk/oracle-13.0.2+8-darwin", + "/jdk/oracle-13.0.2+8-linux", + "/jdk/oracle-13.0.2+8-windows", + "/jdk/oracle-16.0.1+9-darwin", + "/jdk/oracle-16.0.1+9-linux", + "/jdk/oracle-16.0.1+9-linux-aarch64", + "/jdk/oracle-16.0.1+9-windows", + "/jdk/oracle-16.0.2+7-darwin", + "/jdk/oracle-16.0.2+7-linux", + "/jdk/oracle-16.0.2+7-linux-aarch64", + "/jdk/oracle-16.0.2+7-windows", + "/jdk/oracle-7u80-darwin", + "/jdk/oracle-7u80-linux", + "/jdk/oracle-7u80-windows", + "/jdk/oracle-8u161-darwin", + "/jdk/oracle-8u161-linux", + "/jdk/oracle-8u161-windows", + "/jdk/oracle-8u162-darwin", + "/jdk/oracle-8u162-linux", + "/jdk/oracle-8u162-windows", + "/jdk/oracle-8u171-darwin", + "/jdk/oracle-8u171-linux", + "/jdk/oracle-8u171-windows", + "/jdk/oracle-8u172-darwin", + "/jdk/oracle-8u172-linux", + "/jdk/oracle-8u172-windows", + "/jdk/oracle-8u181-darwin", + "/jdk/oracle-8u181-linux", + "/jdk/oracle-8u181-windows", + "/jdk/oracle-8u191-darwin", + "/jdk/oracle-8u191-linux", + "/jdk/oracle-8u191-windows", + "/jdk/oracle-8u192-darwin", + "/jdk/oracle-8u192-linux", + "/jdk/oracle-8u192-windows", + "/jdk/oracle-8u201-darwin", + "/jdk/oracle-8u201-linux", + "/jdk/oracle-8u201-windows", + "/jdk/oracle-8u202-darwin", + "/jdk/oracle-8u202-linux", + "/jdk/oracle-8u202-windows", + "/jdk/oracle-8u211-darwin", + "/jdk/oracle-8u211-linux", + "/jdk/oracle-8u211-windows", + "/jdk/oracle-8u212-darwin", + "/jdk/oracle-8u212-linux", + "/jdk/oracle-8u212-windows", + "/jdk/oracle-8u221-darwin", + "/jdk/oracle-8u221-linux", + "/jdk/oracle-8u221-windows", + "/jdk/oracle-8u231-darwin", + "/jdk/oracle-8u231-linux", + "/jdk/oracle-8u231-windows", + "/jdk/oracle-8u241-darwin", + "/jdk/oracle-8u241-linux", + "/jdk/oracle-8u241-windows", + "/jdk/oracle-8u271-darwin", + "/jdk/oracle-8u271-linux", + "/jdk/oracle-8u271-linux-aarch64", + "/jdk/oracle-8u271-linux-x86_32", + "/jdk/oracle-8u271-windows", + "/jdk/oracle-8u271-windows-x86_32", + "/jdk/oracle-8u281-darwin", + "/jdk/oracle-8u281-linux", + "/jdk/oracle-8u281-linux-aarch64", + "/jdk/oracle-8u281-linux-x86_32", + "/jdk/oracle-8u281-windows", + "/jdk/oracle-8u281-windows-x86_32", + "/jdk/oracle-8u291-darwin", + "/jdk/oracle-8u291-linux", + "/jdk/oracle-8u291-linux-aarch64", + "/jdk/oracle-8u291-linux-x86_32", + "/jdk/oracle-8u291-windows", + "/jdk/oracle-8u291-windows-x86_32", + "/jdk/oracle-8u301-darwin", + "/jdk/oracle-8u301-linux", + "/jdk/oracle-8u301-linux-aarch64", + "/jdk/oracle-8u301-linux-x86_32", + "/jdk/oracle-8u301-windows", + "/jdk/oracle-8u301-windows-x86_32", + "/jdk/oracle-9.0.4+11-darwin", + "/jdk/oracle-9.0.4+11-linux", + "/jdk/oracle-9.0.4+11-windows", + "/jdk/sapjvm-7.1.073-linux", + "/jdk/sapjvm-8.1.065-linux", + "/jdk/sapjvm-8.1.067-linux", + "/jdk/sapjvm-8.1.071-linux", + "/jdk/zulu-1.8.0.131-linux-aarch64", + "/jdk/zulu-1.8.0.144-linux-aarch64", + "/jdk/zulu-1.8.0.152-linux-aarch64", + "/jdk/zulu-1.8.0.162-linux-aarch64", + "/jdk/zulu-1.8.0.172-linux-aarch64", + "/jdk/zulu-1.8.0.181-linux-aarch64", + "/jdk/zulu-1.8.0.192-linux-aarch64", + "/jdk/zulu-1.8.0.202-linux-aarch64", + "/jdk/zulu-1.8.0.212-linux-aarch64", + "/jdk/zulu-10.0.0-darwin", + "/jdk/zulu-10.0.0-linux", + "/jdk/zulu-10.0.0-windows", + "/jdk/zulu-10.0.1-darwin", + "/jdk/zulu-10.0.1-linux", + "/jdk/zulu-10.0.1-windows", + "/jdk/zulu-10.0.2-darwin", + "/jdk/zulu-10.0.2-linux", + "/jdk/zulu-10.0.2-windows", + "/jdk/zulu-11.0.0-linux-aarch64", + "/jdk/zulu-11.0.1-darwin", + "/jdk/zulu-11.0.1-linux", + "/jdk/zulu-11.0.1-windows", + "/jdk/zulu-11.0.10-darwin", + "/jdk/zulu-11.0.10-darwin-aarch64", + "/jdk/zulu-11.0.10-linux", + "/jdk/zulu-11.0.10-windows", + "/jdk/zulu-11.0.11-darwin", + "/jdk/zulu-11.0.11-darwin-aarch64", + "/jdk/zulu-11.0.11-linux", + "/jdk/zulu-11.0.11-windows", + "/jdk/zulu-11.0.12-darwin", + "/jdk/zulu-11.0.12-darwin-aarch64", + "/jdk/zulu-11.0.12-linux", + "/jdk/zulu-11.0.12-windows", + "/jdk/zulu-11.0.13-darwin", + "/jdk/zulu-11.0.13-darwin-aarch64", + "/jdk/zulu-11.0.13-linux", + "/jdk/zulu-11.0.13-windows", + "/jdk/zulu-11.0.14-darwin", + "/jdk/zulu-11.0.14-darwin-aarch64", + "/jdk/zulu-11.0.14-linux", + "/jdk/zulu-11.0.14-windows", + "/jdk/zulu-11.0.14.1-darwin", + "/jdk/zulu-11.0.14.1-darwin-aarch64", + "/jdk/zulu-11.0.14.1-linux", + "/jdk/zulu-11.0.14.1-windows", + "/jdk/zulu-11.0.15-darwin", + "/jdk/zulu-11.0.15-darwin-aarch64", + "/jdk/zulu-11.0.15-linux", + "/jdk/zulu-11.0.15-windows", + "/jdk/zulu-11.0.16-darwin", + "/jdk/zulu-11.0.16-darwin-aarch64", + "/jdk/zulu-11.0.16-linux", + "/jdk/zulu-11.0.16-windows", + "/jdk/zulu-11.0.16.1-darwin", + "/jdk/zulu-11.0.16.1-darwin-aarch64", + "/jdk/zulu-11.0.16.1-linux", + "/jdk/zulu-11.0.16.1-windows", + "/jdk/zulu-11.0.17-darwin", + "/jdk/zulu-11.0.17-darwin-aarch64", + "/jdk/zulu-11.0.17-linux", + "/jdk/zulu-11.0.17-windows", + "/jdk/zulu-11.0.18-darwin", + "/jdk/zulu-11.0.18-darwin-aarch64", + "/jdk/zulu-11.0.18-linux", + "/jdk/zulu-11.0.18-windows", + "/jdk/zulu-11.0.19-darwin", + "/jdk/zulu-11.0.19-darwin-aarch64", + "/jdk/zulu-11.0.19-linux", + "/jdk/zulu-11.0.19-windows", + "/jdk/zulu-11.0.2-darwin", + "/jdk/zulu-11.0.2-linux", + "/jdk/zulu-11.0.2-windows", + "/jdk/zulu-11.0.20-darwin", + "/jdk/zulu-11.0.20-darwin-aarch64", + "/jdk/zulu-11.0.20-linux", + "/jdk/zulu-11.0.20-linux-aarch64", + "/jdk/zulu-11.0.20-windows", + "/jdk/zulu-11.0.20.1-darwin", + "/jdk/zulu-11.0.20.1-darwin-aarch64", + "/jdk/zulu-11.0.20.1-linux", + "/jdk/zulu-11.0.20.1-linux-aarch64", + "/jdk/zulu-11.0.20.1-windows", + "/jdk/zulu-11.0.21-darwin", + "/jdk/zulu-11.0.21-darwin-aarch64", + "/jdk/zulu-11.0.21-linux", + "/jdk/zulu-11.0.21-linux-aarch64", + "/jdk/zulu-11.0.21-windows", + "/jdk/zulu-11.0.22-darwin", + "/jdk/zulu-11.0.22-darwin-aarch64", + "/jdk/zulu-11.0.22-linux", + "/jdk/zulu-11.0.22-linux-aarch64", + "/jdk/zulu-11.0.22-windows", + "/jdk/zulu-11.0.23-darwin", + "/jdk/zulu-11.0.23-darwin-aarch64", + "/jdk/zulu-11.0.23-linux", + "/jdk/zulu-11.0.23-linux-aarch64", + "/jdk/zulu-11.0.23-windows", + "/jdk/zulu-11.0.3-darwin", + "/jdk/zulu-11.0.3-linux", + "/jdk/zulu-11.0.3-linux-aarch64", + "/jdk/zulu-11.0.3-windows", + "/jdk/zulu-11.0.4-darwin", + "/jdk/zulu-11.0.4-linux", + "/jdk/zulu-11.0.4-windows", + "/jdk/zulu-11.0.5-darwin", + "/jdk/zulu-11.0.5-linux", + "/jdk/zulu-11.0.5-linux-aarch64", + "/jdk/zulu-11.0.5-windows", + "/jdk/zulu-11.0.6-darwin", + "/jdk/zulu-11.0.6-linux", + "/jdk/zulu-11.0.6-linux-aarch64", + "/jdk/zulu-11.0.6-windows", + "/jdk/zulu-11.0.7-darwin", + "/jdk/zulu-11.0.7-linux", + "/jdk/zulu-11.0.7-linux-aarch64", + "/jdk/zulu-11.0.7-windows", + "/jdk/zulu-11.0.8-darwin", + "/jdk/zulu-11.0.8-linux", + "/jdk/zulu-11.0.8-linux-aarch64", + "/jdk/zulu-11.0.8-windows", + "/jdk/zulu-11.0.9-darwin", + "/jdk/zulu-11.0.9-linux", + "/jdk/zulu-11.0.9-windows", + "/jdk/zulu-11.0.9.1-darwin", + "/jdk/zulu-11.0.9.1-darwin-aarch64", + "/jdk/zulu-11.0.9.1-linux", + "/jdk/zulu-11.0.9.1-windows", + "/jdk/zulu-12-darwin", + "/jdk/zulu-12-linux", + "/jdk/zulu-12-windows", + "/jdk/zulu-12.0.0-darwin", + "/jdk/zulu-12.0.0-linux", + "/jdk/zulu-12.0.0-windows", + "/jdk/zulu-12.0.1-darwin", + "/jdk/zulu-12.0.1-linux", + "/jdk/zulu-12.0.1-windows", + "/jdk/zulu-12.0.2-darwin", + "/jdk/zulu-12.0.2-linux", + "/jdk/zulu-12.0.2-windows", + "/jdk/zulu-13-darwin", + "/jdk/zulu-13-linux", + "/jdk/zulu-13-windows", + "/jdk/zulu-13.0.0-darwin", + "/jdk/zulu-13.0.0-linux", + "/jdk/zulu-13.0.0-windows", + "/jdk/zulu-13.0.1-darwin", + "/jdk/zulu-13.0.1-linux", + "/jdk/zulu-13.0.1-windows", + "/jdk/zulu-13.0.10-darwin", + "/jdk/zulu-13.0.10-darwin-aarch64", + "/jdk/zulu-13.0.10-linux", + "/jdk/zulu-13.0.10-windows", + "/jdk/zulu-13.0.11-darwin", + "/jdk/zulu-13.0.11-darwin-aarch64", + "/jdk/zulu-13.0.11-linux", + "/jdk/zulu-13.0.11-linux-aarch64", + "/jdk/zulu-13.0.11-windows", + "/jdk/zulu-13.0.12-darwin", + "/jdk/zulu-13.0.12-darwin-aarch64", + "/jdk/zulu-13.0.12-linux", + "/jdk/zulu-13.0.12-linux-aarch64", + "/jdk/zulu-13.0.12-windows", + "/jdk/zulu-13.0.13-darwin", + "/jdk/zulu-13.0.13-darwin-aarch64", + "/jdk/zulu-13.0.13-linux", + "/jdk/zulu-13.0.13-linux-aarch64", + "/jdk/zulu-13.0.13-windows", + "/jdk/zulu-13.0.14-darwin", + "/jdk/zulu-13.0.14-darwin-aarch64", + "/jdk/zulu-13.0.14-linux", + "/jdk/zulu-13.0.14-linux-aarch64", + "/jdk/zulu-13.0.14-windows", + "/jdk/zulu-13.0.2-darwin", + "/jdk/zulu-13.0.2-linux", + "/jdk/zulu-13.0.2-linux-aarch64", + "/jdk/zulu-13.0.2-windows", + "/jdk/zulu-13.0.3-darwin", + "/jdk/zulu-13.0.3-linux", + "/jdk/zulu-13.0.3-linux-aarch64", + "/jdk/zulu-13.0.3-windows", + "/jdk/zulu-13.0.4-darwin", + "/jdk/zulu-13.0.4-linux", + "/jdk/zulu-13.0.4-linux-aarch64", + "/jdk/zulu-13.0.4-windows", + "/jdk/zulu-13.0.5-darwin", + "/jdk/zulu-13.0.5-linux", + "/jdk/zulu-13.0.5-windows", + "/jdk/zulu-13.0.5.1-darwin", + "/jdk/zulu-13.0.5.1-darwin-aarch64", + "/jdk/zulu-13.0.5.1-linux", + "/jdk/zulu-13.0.5.1-windows", + "/jdk/zulu-13.0.6-darwin", + "/jdk/zulu-13.0.6-darwin-aarch64", + "/jdk/zulu-13.0.6-linux", + "/jdk/zulu-13.0.6-windows", + "/jdk/zulu-13.0.7-darwin", + "/jdk/zulu-13.0.7-darwin-aarch64", + "/jdk/zulu-13.0.7-linux", + "/jdk/zulu-13.0.7-windows", + "/jdk/zulu-13.0.8-darwin", + "/jdk/zulu-13.0.8-darwin-aarch64", + "/jdk/zulu-13.0.8-linux", + "/jdk/zulu-13.0.8-windows", + "/jdk/zulu-13.0.9-darwin", + "/jdk/zulu-13.0.9-darwin-aarch64", + "/jdk/zulu-13.0.9-linux", + "/jdk/zulu-13.0.9-windows", + "/jdk/zulu-14-darwin", + "/jdk/zulu-14-linux", + "/jdk/zulu-14-windows", + "/jdk/zulu-14.0.0-darwin", + "/jdk/zulu-14.0.0-linux", + "/jdk/zulu-14.0.0-windows", + "/jdk/zulu-14.0.1-darwin", + "/jdk/zulu-14.0.1-linux", + "/jdk/zulu-14.0.1-windows", + "/jdk/zulu-14.0.2-darwin", + "/jdk/zulu-14.0.2-linux", + "/jdk/zulu-14.0.2-windows", + "/jdk/zulu-15.0.0-darwin", + "/jdk/zulu-15.0.0-linux", + "/jdk/zulu-15.0.0-windows", + "/jdk/zulu-15.0.1-darwin", + "/jdk/zulu-15.0.1-darwin-aarch64", + "/jdk/zulu-15.0.1-linux", + "/jdk/zulu-15.0.1-windows", + "/jdk/zulu-15.0.10-darwin", + "/jdk/zulu-15.0.10-darwin-aarch64", + "/jdk/zulu-15.0.10-linux", + "/jdk/zulu-15.0.10-linux-aarch64", + "/jdk/zulu-15.0.10-windows", + "/jdk/zulu-15.0.2-darwin", + "/jdk/zulu-15.0.2-darwin-aarch64", + "/jdk/zulu-15.0.2-linux", + "/jdk/zulu-15.0.2-windows", + "/jdk/zulu-15.0.3-darwin", + "/jdk/zulu-15.0.3-darwin-aarch64", + "/jdk/zulu-15.0.3-linux", + "/jdk/zulu-15.0.3-windows", + "/jdk/zulu-15.0.4-darwin", + "/jdk/zulu-15.0.4-darwin-aarch64", + "/jdk/zulu-15.0.4-linux", + "/jdk/zulu-15.0.4-linux-aarch64", + "/jdk/zulu-15.0.4-windows", + "/jdk/zulu-15.0.5-darwin", + "/jdk/zulu-15.0.5-darwin-aarch64", + "/jdk/zulu-15.0.5-linux", + "/jdk/zulu-15.0.5-linux-aarch64", + "/jdk/zulu-15.0.5-windows", + "/jdk/zulu-15.0.6-darwin", + "/jdk/zulu-15.0.6-darwin-aarch64", + "/jdk/zulu-15.0.6-linux", + "/jdk/zulu-15.0.6-linux-aarch64", + "/jdk/zulu-15.0.6-windows", + "/jdk/zulu-15.0.7-darwin", + "/jdk/zulu-15.0.7-darwin-aarch64", + "/jdk/zulu-15.0.7-linux", + "/jdk/zulu-15.0.7-linux-aarch64", + "/jdk/zulu-15.0.7-windows", + "/jdk/zulu-15.0.8-darwin", + "/jdk/zulu-15.0.8-darwin-aarch64", + "/jdk/zulu-15.0.8-linux", + "/jdk/zulu-15.0.8-linux-aarch64", + "/jdk/zulu-15.0.8-windows", + "/jdk/zulu-15.0.9-darwin", + "/jdk/zulu-15.0.9-darwin-aarch64", + "/jdk/zulu-15.0.9-linux", + "/jdk/zulu-15.0.9-linux-aarch64", + "/jdk/zulu-15.0.9-windows", + "/jdk/zulu-16.0.0-darwin", + "/jdk/zulu-16.0.0-darwin-aarch64", + "/jdk/zulu-16.0.0-linux", + "/jdk/zulu-16.0.0-linux-aarch64", + "/jdk/zulu-16.0.0-windows", + "/jdk/zulu-16.0.1-darwin", + "/jdk/zulu-16.0.1-darwin-aarch64", + "/jdk/zulu-16.0.1-linux", + "/jdk/zulu-16.0.1-linux-aarch64", + "/jdk/zulu-16.0.1-windows", + "/jdk/zulu-16.0.1-windows-aarch64", + "/jdk/zulu-16.0.2-darwin", + "/jdk/zulu-16.0.2-darwin-aarch64", + "/jdk/zulu-16.0.2-linux", + "/jdk/zulu-16.0.2-linux-aarch64", + "/jdk/zulu-16.0.2-windows", + "/jdk/zulu-16.0.2-windows-aarch64", + "/jdk/zulu-17.0.0-darwin", + "/jdk/zulu-17.0.0-darwin-aarch64", + "/jdk/zulu-17.0.0-linux", + "/jdk/zulu-17.0.0-linux-aarch64", + "/jdk/zulu-17.0.0-windows", + "/jdk/zulu-17.0.0-windows-aarch64", + "/jdk/zulu-17.0.1-darwin", + "/jdk/zulu-17.0.1-darwin-aarch64", + "/jdk/zulu-17.0.1-linux", + "/jdk/zulu-17.0.1-linux-aarch64", + "/jdk/zulu-17.0.1-windows", + "/jdk/zulu-17.0.1-windows-aarch64", + "/jdk/zulu-17.0.10-darwin", + "/jdk/zulu-17.0.10-darwin-aarch64", + "/jdk/zulu-17.0.10-linux", + "/jdk/zulu-17.0.10-linux-aarch64", + "/jdk/zulu-17.0.10-windows", + "/jdk/zulu-17.0.10-windows-aarch64", + "/jdk/zulu-17.0.11-darwin", + "/jdk/zulu-17.0.11-darwin-aarch64", + "/jdk/zulu-17.0.11-linux", + "/jdk/zulu-17.0.11-linux-aarch64", + "/jdk/zulu-17.0.11-windows", + "/jdk/zulu-17.0.11-windows-aarch64", + "/jdk/zulu-17.0.2-darwin", + "/jdk/zulu-17.0.2-darwin-aarch64", + "/jdk/zulu-17.0.2-linux", + "/jdk/zulu-17.0.2-linux-aarch64", + "/jdk/zulu-17.0.2-windows", + "/jdk/zulu-17.0.2-windows-aarch64", + "/jdk/zulu-17.0.3-darwin", + "/jdk/zulu-17.0.3-darwin-aarch64", + "/jdk/zulu-17.0.3-linux", + "/jdk/zulu-17.0.3-linux-aarch64", + "/jdk/zulu-17.0.3-windows", + "/jdk/zulu-17.0.3-windows-aarch64", + "/jdk/zulu-17.0.4-darwin", + "/jdk/zulu-17.0.4-darwin-aarch64", + "/jdk/zulu-17.0.4-linux", + "/jdk/zulu-17.0.4-linux-aarch64", + "/jdk/zulu-17.0.4-windows", + "/jdk/zulu-17.0.4-windows-aarch64", + "/jdk/zulu-17.0.4.1-darwin", + "/jdk/zulu-17.0.4.1-darwin-aarch64", + "/jdk/zulu-17.0.4.1-linux", + "/jdk/zulu-17.0.4.1-linux-aarch64", + "/jdk/zulu-17.0.4.1-windows", + "/jdk/zulu-17.0.4.1-windows-aarch64", + "/jdk/zulu-17.0.5-darwin", + "/jdk/zulu-17.0.5-darwin-aarch64", + "/jdk/zulu-17.0.5-linux", + "/jdk/zulu-17.0.5-linux-aarch64", + "/jdk/zulu-17.0.5-windows", + "/jdk/zulu-17.0.5-windows-aarch64", + "/jdk/zulu-17.0.6-darwin", + "/jdk/zulu-17.0.6-darwin-aarch64", + "/jdk/zulu-17.0.6-linux", + "/jdk/zulu-17.0.6-linux-aarch64", + "/jdk/zulu-17.0.6-windows", + "/jdk/zulu-17.0.6-windows-aarch64", + "/jdk/zulu-17.0.7-darwin", + "/jdk/zulu-17.0.7-darwin-aarch64", + "/jdk/zulu-17.0.7-linux", + "/jdk/zulu-17.0.7-linux-aarch64", + "/jdk/zulu-17.0.7-windows", + "/jdk/zulu-17.0.7-windows-aarch64", + "/jdk/zulu-17.0.8-darwin", + "/jdk/zulu-17.0.8-darwin-aarch64", + "/jdk/zulu-17.0.8-linux", + "/jdk/zulu-17.0.8-linux-aarch64", + "/jdk/zulu-17.0.8-windows", + "/jdk/zulu-17.0.8-windows-aarch64", + "/jdk/zulu-17.0.8.1-darwin", + "/jdk/zulu-17.0.8.1-darwin-aarch64", + "/jdk/zulu-17.0.8.1-linux", + "/jdk/zulu-17.0.8.1-linux-aarch64", + "/jdk/zulu-17.0.8.1-windows", + "/jdk/zulu-17.0.8.1-windows-aarch64", + "/jdk/zulu-17.0.9-darwin", + "/jdk/zulu-17.0.9-darwin-aarch64", + "/jdk/zulu-17.0.9-linux", + "/jdk/zulu-17.0.9-linux-aarch64", + "/jdk/zulu-17.0.9-windows", + "/jdk/zulu-17.0.9-windows-aarch64", + "/jdk/zulu-18.0.0-darwin", + "/jdk/zulu-18.0.0-darwin-aarch64", + "/jdk/zulu-18.0.0-linux", + "/jdk/zulu-18.0.0-linux-aarch64", + "/jdk/zulu-18.0.0-windows", + "/jdk/zulu-18.0.0-windows-aarch64", + "/jdk/zulu-18.0.1-darwin", + "/jdk/zulu-18.0.1-darwin-aarch64", + "/jdk/zulu-18.0.1-linux", + "/jdk/zulu-18.0.1-linux-aarch64", + "/jdk/zulu-18.0.1-windows", + "/jdk/zulu-18.0.1-windows-aarch64", + "/jdk/zulu-18.0.2-darwin", + "/jdk/zulu-18.0.2-darwin-aarch64", + "/jdk/zulu-18.0.2-linux", + "/jdk/zulu-18.0.2-linux-aarch64", + "/jdk/zulu-18.0.2-windows", + "/jdk/zulu-18.0.2-windows-aarch64", + "/jdk/zulu-18.0.2.1-darwin", + "/jdk/zulu-18.0.2.1-darwin-aarch64", + "/jdk/zulu-18.0.2.1-linux", + "/jdk/zulu-18.0.2.1-linux-aarch64", + "/jdk/zulu-18.0.2.1-windows", + "/jdk/zulu-18.0.2.1-windows-aarch64", + "/jdk/zulu-19.0.0-darwin", + "/jdk/zulu-19.0.0-darwin-aarch64", + "/jdk/zulu-19.0.0-linux", + "/jdk/zulu-19.0.0-linux-aarch64", + "/jdk/zulu-19.0.0-windows", + "/jdk/zulu-19.0.0-windows-aarch64", + "/jdk/zulu-19.0.1-darwin", + "/jdk/zulu-19.0.1-darwin-aarch64", + "/jdk/zulu-19.0.1-linux", + "/jdk/zulu-19.0.1-linux-aarch64", + "/jdk/zulu-19.0.1-windows", + "/jdk/zulu-19.0.2-darwin", + "/jdk/zulu-19.0.2-darwin-aarch64", + "/jdk/zulu-19.0.2-linux", + "/jdk/zulu-19.0.2-linux-aarch64", + "/jdk/zulu-19.0.2-windows", + "/jdk/zulu-20.0.0-darwin", + "/jdk/zulu-20.0.0-darwin-aarch64", + "/jdk/zulu-20.0.0-linux", + "/jdk/zulu-20.0.0-linux-aarch64", + "/jdk/zulu-20.0.0-windows", + "/jdk/zulu-20.0.1-darwin", + "/jdk/zulu-20.0.1-darwin-aarch64", + "/jdk/zulu-20.0.1-linux", + "/jdk/zulu-20.0.1-linux-aarch64", + "/jdk/zulu-20.0.1-windows", + "/jdk/zulu-20.0.2-darwin", + "/jdk/zulu-20.0.2-darwin-aarch64", + "/jdk/zulu-20.0.2-linux", + "/jdk/zulu-20.0.2-linux-aarch64", + "/jdk/zulu-20.0.2-windows", + "/jdk/zulu-21.0.0-darwin", + "/jdk/zulu-21.0.0-darwin-aarch64", + "/jdk/zulu-21.0.0-linux", + "/jdk/zulu-21.0.0-linux-aarch64", + "/jdk/zulu-21.0.0-windows", + "/jdk/zulu-21.0.1-darwin", + "/jdk/zulu-21.0.1-darwin-aarch64", + "/jdk/zulu-21.0.1-linux", + "/jdk/zulu-21.0.1-linux-aarch64", + "/jdk/zulu-21.0.1-windows", + "/jdk/zulu-21.0.2-darwin", + "/jdk/zulu-21.0.2-darwin-aarch64", + "/jdk/zulu-21.0.2-linux", + "/jdk/zulu-21.0.2-linux-aarch64", + "/jdk/zulu-21.0.2-windows", + "/jdk/zulu-21.0.3-darwin", + "/jdk/zulu-21.0.3-darwin-aarch64", + "/jdk/zulu-21.0.3-linux", + "/jdk/zulu-21.0.3-linux-aarch64", + "/jdk/zulu-21.0.3-windows", + "/jdk/zulu-21.0.3-windows-aarch64", + "/jdk/zulu-22.0.0-darwin", + "/jdk/zulu-22.0.0-darwin-aarch64", + "/jdk/zulu-22.0.0-linux", + "/jdk/zulu-22.0.0-linux-aarch64", + "/jdk/zulu-22.0.0-windows", + "/jdk/zulu-22.0.1-darwin", + "/jdk/zulu-22.0.1-darwin-aarch64", + "/jdk/zulu-22.0.1-linux", + "/jdk/zulu-22.0.1-linux-aarch64", + "/jdk/zulu-22.0.1-windows", + "/jdk/zulu-6.0.103-linux", + "/jdk/zulu-6.0.103-windows", + "/jdk/zulu-6.0.107-linux", + "/jdk/zulu-6.0.107-windows", + "/jdk/zulu-6.0.113-linux", + "/jdk/zulu-6.0.113-windows", + "/jdk/zulu-6.0.119-linux", + "/jdk/zulu-6.0.119-windows", + "/jdk/zulu-6.0.47-windows", + "/jdk/zulu-6.0.49-windows", + "/jdk/zulu-6.0.53-windows", + "/jdk/zulu-6.0.56-windows", + "/jdk/zulu-6.0.59-windows", + "/jdk/zulu-6.0.63-windows", + "/jdk/zulu-6.0.69-windows", + "/jdk/zulu-6.0.73-windows", + "/jdk/zulu-6.0.77-linux", + "/jdk/zulu-6.0.77-windows", + "/jdk/zulu-6.0.79-linux", + "/jdk/zulu-6.0.79-windows", + "/jdk/zulu-6.0.83-linux", + "/jdk/zulu-6.0.83-windows", + "/jdk/zulu-6.0.87-linux", + "/jdk/zulu-6.0.87-windows", + "/jdk/zulu-6.0.89-linux", + "/jdk/zulu-6.0.89-windows", + "/jdk/zulu-6.0.93-linux", + "/jdk/zulu-6.0.93-windows", + "/jdk/zulu-6.0.97-linux", + "/jdk/zulu-6.0.97-windows", + "/jdk/zulu-6.0.99-linux", + "/jdk/zulu-6.0.99-windows", + "/jdk/zulu-7.0.101-darwin", + "/jdk/zulu-7.0.101-linux", + "/jdk/zulu-7.0.101-windows", + "/jdk/zulu-7.0.111-darwin", + "/jdk/zulu-7.0.111-linux", + "/jdk/zulu-7.0.111-windows", + "/jdk/zulu-7.0.121-darwin", + "/jdk/zulu-7.0.121-linux", + "/jdk/zulu-7.0.121-windows", + "/jdk/zulu-7.0.131-darwin", + "/jdk/zulu-7.0.131-linux", + "/jdk/zulu-7.0.131-windows", + "/jdk/zulu-7.0.141-darwin", + "/jdk/zulu-7.0.141-linux", + "/jdk/zulu-7.0.141-windows", + "/jdk/zulu-7.0.154-darwin", + "/jdk/zulu-7.0.154-linux", + "/jdk/zulu-7.0.154-windows", + "/jdk/zulu-7.0.161-darwin", + "/jdk/zulu-7.0.161-linux", + "/jdk/zulu-7.0.161-windows", + "/jdk/zulu-7.0.171-darwin", + "/jdk/zulu-7.0.171-linux", + "/jdk/zulu-7.0.171-windows", + "/jdk/zulu-7.0.181-darwin", + "/jdk/zulu-7.0.181-linux", + "/jdk/zulu-7.0.181-windows", + "/jdk/zulu-7.0.191-darwin", + "/jdk/zulu-7.0.191-linux", + "/jdk/zulu-7.0.191-windows", + "/jdk/zulu-7.0.201-darwin", + "/jdk/zulu-7.0.201-linux", + "/jdk/zulu-7.0.201-windows", + "/jdk/zulu-7.0.211-darwin", + "/jdk/zulu-7.0.211-linux", + "/jdk/zulu-7.0.211-windows", + "/jdk/zulu-7.0.222-darwin", + "/jdk/zulu-7.0.222-linux", + "/jdk/zulu-7.0.222-windows", + "/jdk/zulu-7.0.232-darwin", + "/jdk/zulu-7.0.232-linux", + "/jdk/zulu-7.0.232-windows", + "/jdk/zulu-7.0.242-darwin", + "/jdk/zulu-7.0.242-linux", + "/jdk/zulu-7.0.242-windows", + "/jdk/zulu-7.0.252-darwin", + "/jdk/zulu-7.0.252-linux", + "/jdk/zulu-7.0.252-windows", + "/jdk/zulu-7.0.262-darwin", + "/jdk/zulu-7.0.262-linux", + "/jdk/zulu-7.0.262-windows", + "/jdk/zulu-7.0.272-darwin", + "/jdk/zulu-7.0.272-linux", + "/jdk/zulu-7.0.272-windows", + "/jdk/zulu-7.0.282-darwin", + "/jdk/zulu-7.0.282-linux", + "/jdk/zulu-7.0.282-windows", + "/jdk/zulu-7.0.285-darwin", + "/jdk/zulu-7.0.285-linux", + "/jdk/zulu-7.0.285-windows", + "/jdk/zulu-7.0.292-darwin", + "/jdk/zulu-7.0.292-linux", + "/jdk/zulu-7.0.292-windows", + "/jdk/zulu-7.0.302-darwin", + "/jdk/zulu-7.0.302-linux", + "/jdk/zulu-7.0.302-windows", + "/jdk/zulu-7.0.312-darwin", + "/jdk/zulu-7.0.312-linux", + "/jdk/zulu-7.0.312-windows", + "/jdk/zulu-7.0.322-darwin", + "/jdk/zulu-7.0.322-linux", + "/jdk/zulu-7.0.322-windows", + "/jdk/zulu-7.0.332-darwin", + "/jdk/zulu-7.0.332-linux", + "/jdk/zulu-7.0.332-windows", + "/jdk/zulu-7.0.342-darwin", + "/jdk/zulu-7.0.342-linux", + "/jdk/zulu-7.0.342-windows", + "/jdk/zulu-7.0.352-darwin", + "/jdk/zulu-7.0.352-linux", + "/jdk/zulu-7.0.352-windows", + "/jdk/zulu-7.0.45-windows", + "/jdk/zulu-7.0.51-windows", + "/jdk/zulu-7.0.55-windows", + "/jdk/zulu-7.0.60-windows", + "/jdk/zulu-7.0.65-darwin", + "/jdk/zulu-7.0.65-windows", + "/jdk/zulu-7.0.72-darwin", + "/jdk/zulu-7.0.72-windows", + "/jdk/zulu-7.0.76-darwin", + "/jdk/zulu-7.0.76-windows", + "/jdk/zulu-7.0.79-darwin", + "/jdk/zulu-7.0.79-windows", + "/jdk/zulu-7.0.80-darwin", + "/jdk/zulu-7.0.80-windows", + "/jdk/zulu-7.0.85-darwin", + "/jdk/zulu-7.0.85-windows", + "/jdk/zulu-7.0.95-darwin", + "/jdk/zulu-7.0.95-linux", + "/jdk/zulu-7.0.95-windows", + "/jdk/zulu-8.0.0-windows", + "/jdk/zulu-8.0.101-windows", + "/jdk/zulu-8.0.102-darwin", + "/jdk/zulu-8.0.102-linux", + "/jdk/zulu-8.0.102-windows", + "/jdk/zulu-8.0.11-darwin", + "/jdk/zulu-8.0.11-windows", + "/jdk/zulu-8.0.112-darwin", + "/jdk/zulu-8.0.112-linux", + "/jdk/zulu-8.0.112-windows", + "/jdk/zulu-8.0.121-darwin", + "/jdk/zulu-8.0.121-linux", + "/jdk/zulu-8.0.121-windows", + "/jdk/zulu-8.0.131-darwin", + "/jdk/zulu-8.0.131-linux", + "/jdk/zulu-8.0.131-windows", + "/jdk/zulu-8.0.144-darwin", + "/jdk/zulu-8.0.144-linux", + "/jdk/zulu-8.0.144-windows", + "/jdk/zulu-8.0.152-darwin", + "/jdk/zulu-8.0.152-linux", + "/jdk/zulu-8.0.152-windows", + "/jdk/zulu-8.0.162-darwin", + "/jdk/zulu-8.0.162-linux", + "/jdk/zulu-8.0.162-windows", + "/jdk/zulu-8.0.163-darwin", + "/jdk/zulu-8.0.163-linux", + "/jdk/zulu-8.0.163-windows", + "/jdk/zulu-8.0.172-darwin", + "/jdk/zulu-8.0.172-linux", + "/jdk/zulu-8.0.172-windows", + "/jdk/zulu-8.0.181-darwin", + "/jdk/zulu-8.0.181-linux", + "/jdk/zulu-8.0.181-windows", + "/jdk/zulu-8.0.192-darwin", + "/jdk/zulu-8.0.192-linux", + "/jdk/zulu-8.0.192-windows", + "/jdk/zulu-8.0.20-darwin", + "/jdk/zulu-8.0.20-windows", + "/jdk/zulu-8.0.201-darwin", + "/jdk/zulu-8.0.201-linux", + "/jdk/zulu-8.0.201-windows", + "/jdk/zulu-8.0.202-darwin", + "/jdk/zulu-8.0.202-linux", + "/jdk/zulu-8.0.202-windows", + "/jdk/zulu-8.0.212-darwin", + "/jdk/zulu-8.0.212-linux", + "/jdk/zulu-8.0.212-windows", + "/jdk/zulu-8.0.222-darwin", + "/jdk/zulu-8.0.222-linux", + "/jdk/zulu-8.0.222-windows", + "/jdk/zulu-8.0.232-darwin", + "/jdk/zulu-8.0.232-linux", + "/jdk/zulu-8.0.232-windows", + "/jdk/zulu-8.0.242-darwin", + "/jdk/zulu-8.0.242-linux", + "/jdk/zulu-8.0.242-windows", + "/jdk/zulu-8.0.25-darwin", + "/jdk/zulu-8.0.25-windows", + "/jdk/zulu-8.0.252-darwin", + "/jdk/zulu-8.0.252-linux", + "/jdk/zulu-8.0.252-linux-aarch64", + "/jdk/zulu-8.0.252-windows", + "/jdk/zulu-8.0.262-darwin", + "/jdk/zulu-8.0.262-linux", + "/jdk/zulu-8.0.262-linux-aarch64", + "/jdk/zulu-8.0.262-windows", + "/jdk/zulu-8.0.265-darwin", + "/jdk/zulu-8.0.265-linux", + "/jdk/zulu-8.0.265-linux-aarch64", + "/jdk/zulu-8.0.265-windows", + "/jdk/zulu-8.0.272-darwin", + "/jdk/zulu-8.0.272-linux", + "/jdk/zulu-8.0.272-windows", + "/jdk/zulu-8.0.275-darwin", + "/jdk/zulu-8.0.275-linux", + "/jdk/zulu-8.0.275-windows", + "/jdk/zulu-8.0.282-darwin", + "/jdk/zulu-8.0.282-darwin-aarch64", + "/jdk/zulu-8.0.282-linux", + "/jdk/zulu-8.0.282-windows", + "/jdk/zulu-8.0.292-darwin", + "/jdk/zulu-8.0.292-darwin-aarch64", + "/jdk/zulu-8.0.292-linux", + "/jdk/zulu-8.0.292-windows", + "/jdk/zulu-8.0.302-darwin", + "/jdk/zulu-8.0.302-darwin-aarch64", + "/jdk/zulu-8.0.302-linux", + "/jdk/zulu-8.0.302-windows", + "/jdk/zulu-8.0.31-darwin", + "/jdk/zulu-8.0.31-windows", + "/jdk/zulu-8.0.312-darwin", + "/jdk/zulu-8.0.312-darwin-aarch64", + "/jdk/zulu-8.0.312-linux", + "/jdk/zulu-8.0.312-windows", + "/jdk/zulu-8.0.322-darwin", + "/jdk/zulu-8.0.322-darwin-aarch64", + "/jdk/zulu-8.0.322-linux", + "/jdk/zulu-8.0.322-windows", + "/jdk/zulu-8.0.332-darwin", + "/jdk/zulu-8.0.332-darwin-aarch64", + "/jdk/zulu-8.0.332-linux", + "/jdk/zulu-8.0.332-windows", + "/jdk/zulu-8.0.342-darwin", + "/jdk/zulu-8.0.342-darwin-aarch64", + "/jdk/zulu-8.0.342-linux", + "/jdk/zulu-8.0.342-windows", + "/jdk/zulu-8.0.345-darwin", + "/jdk/zulu-8.0.345-darwin-aarch64", + "/jdk/zulu-8.0.345-linux", + "/jdk/zulu-8.0.345-windows", + "/jdk/zulu-8.0.352-darwin", + "/jdk/zulu-8.0.352-darwin-aarch64", + "/jdk/zulu-8.0.352-linux", + "/jdk/zulu-8.0.352-windows", + "/jdk/zulu-8.0.362-darwin", + "/jdk/zulu-8.0.362-darwin-aarch64", + "/jdk/zulu-8.0.362-linux", + "/jdk/zulu-8.0.362-windows", + "/jdk/zulu-8.0.372-darwin", + "/jdk/zulu-8.0.372-darwin-aarch64", + "/jdk/zulu-8.0.372-linux", + "/jdk/zulu-8.0.372-windows", + "/jdk/zulu-8.0.382-darwin", + "/jdk/zulu-8.0.382-darwin-aarch64", + "/jdk/zulu-8.0.382-linux", + "/jdk/zulu-8.0.382-windows", + "/jdk/zulu-8.0.392-darwin", + "/jdk/zulu-8.0.392-darwin-aarch64", + "/jdk/zulu-8.0.392-linux", + "/jdk/zulu-8.0.392-linux-aarch64", + "/jdk/zulu-8.0.392-windows", + "/jdk/zulu-8.0.40-windows", + "/jdk/zulu-8.0.402-darwin", + "/jdk/zulu-8.0.402-darwin-aarch64", + "/jdk/zulu-8.0.402-linux", + "/jdk/zulu-8.0.402-windows", + "/jdk/zulu-8.0.412-darwin", + "/jdk/zulu-8.0.412-darwin-aarch64", + "/jdk/zulu-8.0.412-linux", + "/jdk/zulu-8.0.412-linux-aarch64", + "/jdk/zulu-8.0.412-windows", + "/jdk/zulu-8.0.45-darwin", + "/jdk/zulu-8.0.45-windows", + "/jdk/zulu-8.0.5-windows", + "/jdk/zulu-8.0.51-darwin", + "/jdk/zulu-8.0.51-windows", + "/jdk/zulu-8.0.60-darwin", + "/jdk/zulu-8.0.60-windows", + "/jdk/zulu-8.0.65-darwin", + "/jdk/zulu-8.0.65-windows", + "/jdk/zulu-8.0.66-darwin", + "/jdk/zulu-8.0.66-windows", + "/jdk/zulu-8.0.71-darwin", + "/jdk/zulu-8.0.71-linux", + "/jdk/zulu-8.0.71-windows", + "/jdk/zulu-8.0.72-darwin", + "/jdk/zulu-8.0.72-linux", + "/jdk/zulu-8.0.72-windows", + "/jdk/zulu-8.0.91-darwin", + "/jdk/zulu-8.0.91-linux", + "/jdk/zulu-8.0.91-windows", + "/jdk/zulu-8.0.92-darwin", + "/jdk/zulu-8.0.92-linux", + "/jdk/zulu-8.0.92-windows", + "/jdk/zulu-9.0.0-darwin", + "/jdk/zulu-9.0.0-linux", + "/jdk/zulu-9.0.0-windows", + "/jdk/zulu-9.0.1-darwin", + "/jdk/zulu-9.0.1-linux", + "/jdk/zulu-9.0.1-windows", + "/jdk/zulu-9.0.4-darwin", + "/jdk/zulu-9.0.4-linux", + "/jdk/zulu-9.0.4-windows", + "/jdk/zulu-9.0.7-darwin", + "/jdk/zulu-9.0.7-linux", + "/jdk/zulu-9.0.7-windows", + "/jdk/latest_adoptiumjdk_11_darwin", + "/jdk/latest_adoptiumjdk_11_darwin_aarch64", + "/jdk/latest_adoptiumjdk_11_linux", + "/jdk/latest_adoptiumjdk_11_linux_aarch64", + "/jdk/latest_adoptiumjdk_11_windows", + "/jdk/latest_adoptiumjdk_11_windows_x86_32", + "/jdk/latest_adoptiumjdk_16_darwin", + "/jdk/latest_adoptiumjdk_16_linux", + "/jdk/latest_adoptiumjdk_16_linux_aarch64", + "/jdk/latest_adoptiumjdk_16_windows", + "/jdk/latest_adoptiumjdk_16_windows_x86_32", + "/jdk/latest_adoptiumjdk_17_darwin", + "/jdk/latest_adoptiumjdk_17_darwin_aarch64", + "/jdk/latest_adoptiumjdk_17_linux", + "/jdk/latest_adoptiumjdk_17_linux_aarch64", + "/jdk/latest_adoptiumjdk_17_windows", + "/jdk/latest_adoptiumjdk_17_windows_x86_32", + "/jdk/latest_adoptiumjdk_21_darwin", + "/jdk/latest_adoptiumjdk_21_darwin_aarch64", + "/jdk/latest_adoptiumjdk_21_linux", + "/jdk/latest_adoptiumjdk_21_linux_aarch64", + "/jdk/latest_adoptiumjdk_21_windows", + "/jdk/latest_adoptiumjdk_8_darwin", + "/jdk/latest_adoptiumjdk_8_linux", + "/jdk/latest_adoptiumjdk_8_linux_aarch64", + "/jdk/latest_adoptiumjdk_8_windows", + "/jdk/latest_adoptiumjdk_8_windows_x86_32", + "/jdk/latest_adoptopenjdk-openj9_11_linux", + "/jdk/latest_adoptopenjdk-openj9_11_windows", + "/jdk/latest_adoptopenjdk-openj9_8_linux", + "/jdk/latest_adoptopenjdk-openj9_8_windows", + "/jdk/latest_adoptopenjdk_11_darwin", + "/jdk/latest_adoptopenjdk_11_linux", + "/jdk/latest_adoptopenjdk_11_linux_aarch64", + "/jdk/latest_adoptopenjdk_11_windows", + "/jdk/latest_adoptopenjdk_11_windows_x86_32", + "/jdk/latest_adoptopenjdk_12_darwin", + "/jdk/latest_adoptopenjdk_12_linux", + "/jdk/latest_adoptopenjdk_12_linux_aarch64", + "/jdk/latest_adoptopenjdk_12_windows", + "/jdk/latest_adoptopenjdk_12_windows_x86_32", + "/jdk/latest_adoptopenjdk_13_darwin", + "/jdk/latest_adoptopenjdk_13_linux", + "/jdk/latest_adoptopenjdk_13_linux_aarch64", + "/jdk/latest_adoptopenjdk_13_windows", + "/jdk/latest_adoptopenjdk_13_windows_x86_32", + "/jdk/latest_adoptopenjdk_14_darwin", + "/jdk/latest_adoptopenjdk_14_linux", + "/jdk/latest_adoptopenjdk_14_linux_aarch64", + "/jdk/latest_adoptopenjdk_14_windows", + "/jdk/latest_adoptopenjdk_14_windows_x86_32", + "/jdk/latest_adoptopenjdk_15_darwin", + "/jdk/latest_adoptopenjdk_15_linux", + "/jdk/latest_adoptopenjdk_15_linux_aarch64", + "/jdk/latest_adoptopenjdk_15_windows", + "/jdk/latest_adoptopenjdk_15_windows_x86_32", + "/jdk/latest_adoptopenjdk_16_darwin", + "/jdk/latest_adoptopenjdk_16_linux", + "/jdk/latest_adoptopenjdk_16_linux_aarch64", + "/jdk/latest_adoptopenjdk_16_windows", + "/jdk/latest_adoptopenjdk_16_windows_x86_32", + "/jdk/latest_adoptopenjdk_8_darwin", + "/jdk/latest_adoptopenjdk_8_linux", + "/jdk/latest_adoptopenjdk_8_linux_aarch64", + "/jdk/latest_adoptopenjdk_8_windows", + "/jdk/latest_adoptopenjdk_8_windows_x86_32", + "/jdk/latest_amazon-corretto_11_darwin", + "/jdk/latest_amazon-corretto_11_darwin_aarch64", + "/jdk/latest_amazon-corretto_11_linux", + "/jdk/latest_amazon-corretto_11_linux_aarch64", + "/jdk/latest_amazon-corretto_11_windows", + "/jdk/latest_amazon-corretto_17_darwin", + "/jdk/latest_amazon-corretto_17_darwin_aarch64", + "/jdk/latest_amazon-corretto_17_linux", + "/jdk/latest_amazon-corretto_17_linux_aarch64", + "/jdk/latest_amazon-corretto_17_windows", + "/jdk/latest_amazon-corretto_21_darwin", + "/jdk/latest_amazon-corretto_21_darwin_aarch64", + "/jdk/latest_amazon-corretto_21_linux", + "/jdk/latest_amazon-corretto_21_linux_aarch64", + "/jdk/latest_amazon-corretto_21_windows", + "/jdk/latest_amazon-corretto_8_darwin", + "/jdk/latest_amazon-corretto_8_darwin_aarch64", + "/jdk/latest_amazon-corretto_8_linux", + "/jdk/latest_amazon-corretto_8_linux_aarch64", + "/jdk/latest_amazon-corretto_8_windows", + "/jdk/latest_graalvm-ce_11_darwin", + "/jdk/latest_graalvm-ce_11_darwin_aarch64", + "/jdk/latest_graalvm-ce_11_linux", + "/jdk/latest_graalvm-ce_11_linux_aarch64", + "/jdk/latest_graalvm-ce_11_windows", + "/jdk/latest_graalvm-ce_17_darwin", + "/jdk/latest_graalvm-ce_17_darwin_aarch64", + "/jdk/latest_graalvm-ce_17_linux", + "/jdk/latest_graalvm-ce_17_linux_aarch64", + "/jdk/latest_graalvm-ce_17_windows", + "/jdk/latest_ibm_8_linux", + "/jdk/latest_openjdk_10_darwin", + "/jdk/latest_openjdk_10_linux", + "/jdk/latest_openjdk_10_windows", + "/jdk/latest_openjdk_11_darwin", + "/jdk/latest_openjdk_11_linux", + "/jdk/latest_openjdk_11_windows", + "/jdk/latest_openjdk_12_darwin", + "/jdk/latest_openjdk_12_linux", + "/jdk/latest_openjdk_12_windows", + "/jdk/latest_openjdk_13_darwin", + "/jdk/latest_openjdk_13_linux", + "/jdk/latest_openjdk_13_windows", + "/jdk/latest_openjdk_14_darwin", + "/jdk/latest_openjdk_14_linux", + "/jdk/latest_openjdk_14_windows", + "/jdk/latest_openjdk_15_darwin", + "/jdk/latest_openjdk_15_linux", + "/jdk/latest_openjdk_15_linux_aarch64", + "/jdk/latest_openjdk_15_windows", + "/jdk/latest_openjdk_16_darwin", + "/jdk/latest_openjdk_16_linux", + "/jdk/latest_openjdk_16_linux_aarch64", + "/jdk/latest_openjdk_16_windows", + "/jdk/latest_openjdk_17_darwin", + "/jdk/latest_openjdk_17_darwin_aarch64", + "/jdk/latest_openjdk_17_linux", + "/jdk/latest_openjdk_17_linux_aarch64", + "/jdk/latest_openjdk_17_windows", + "/jdk/latest_openjdk_18_darwin", + "/jdk/latest_openjdk_18_darwin_aarch64", + "/jdk/latest_openjdk_18_linux", + "/jdk/latest_openjdk_18_linux_aarch64", + "/jdk/latest_openjdk_18_windows", + "/jdk/latest_openjdk_19_darwin", + "/jdk/latest_openjdk_19_darwin_aarch64", + "/jdk/latest_openjdk_19_linux", + "/jdk/latest_openjdk_19_linux_aarch64", + "/jdk/latest_openjdk_19_windows", + "/jdk/latest_openjdk_20_darwin", + "/jdk/latest_openjdk_20_darwin_aarch64", + "/jdk/latest_openjdk_20_linux", + "/jdk/latest_openjdk_20_linux_aarch64", + "/jdk/latest_openjdk_20_windows", + "/jdk/latest_openjdk_21_darwin", + "/jdk/latest_openjdk_21_darwin_aarch64", + "/jdk/latest_openjdk_21_linux", + "/jdk/latest_openjdk_21_linux_aarch64", + "/jdk/latest_openjdk_21_windows", + "/jdk/latest_openjdk_22_darwin", + "/jdk/latest_openjdk_22_darwin_aarch64", + "/jdk/latest_openjdk_22_linux", + "/jdk/latest_openjdk_22_linux_aarch64", + "/jdk/latest_openjdk_22_windows", + "/jdk/latest_openjdk_23_darwin", + "/jdk/latest_openjdk_23_darwin_aarch64", + "/jdk/latest_openjdk_23_linux", + "/jdk/latest_openjdk_23_linux_aarch64", + "/jdk/latest_openjdk_23_windows", + "/jdk/latest_openjdk_9_darwin", + "/jdk/latest_openjdk_9_linux", + "/jdk/latest_openjdk_9_windows", + "/jdk/latest_oracle_10_darwin", + "/jdk/latest_oracle_10_linux", + "/jdk/latest_oracle_10_windows", + "/jdk/latest_oracle_11_darwin", + "/jdk/latest_oracle_11_linux", + "/jdk/latest_oracle_11_linux_aarch64", + "/jdk/latest_oracle_11_windows", + "/jdk/latest_oracle_12_darwin", + "/jdk/latest_oracle_12_linux", + "/jdk/latest_oracle_12_windows", + "/jdk/latest_oracle_13_darwin", + "/jdk/latest_oracle_13_linux", + "/jdk/latest_oracle_13_windows", + "/jdk/latest_oracle_16_darwin", + "/jdk/latest_oracle_16_linux", + "/jdk/latest_oracle_16_linux_aarch64", + "/jdk/latest_oracle_16_windows", + "/jdk/latest_oracle_7_darwin", + "/jdk/latest_oracle_7_linux", + "/jdk/latest_oracle_7_windows", + "/jdk/latest_oracle_8_darwin", + "/jdk/latest_oracle_8_linux", + "/jdk/latest_oracle_8_linux_aarch64", + "/jdk/latest_oracle_8_linux_x86_32", + "/jdk/latest_oracle_8_windows", + "/jdk/latest_oracle_8_windows_x86_32", + "/jdk/latest_oracle_9_darwin", + "/jdk/latest_oracle_9_linux", + "/jdk/latest_oracle_9_windows", + "/jdk/latest_sap_8_linux", + "/jdk/latest_zulu_10_darwin", + "/jdk/latest_zulu_10_linux", + "/jdk/latest_zulu_10_windows", + "/jdk/latest_zulu_11_darwin", + "/jdk/latest_zulu_11_darwin_aarch64", + "/jdk/latest_zulu_11_linux", + "/jdk/latest_zulu_11_linux_aarch64", + "/jdk/latest_zulu_11_windows", + "/jdk/latest_zulu_12_darwin", + "/jdk/latest_zulu_12_linux", + "/jdk/latest_zulu_12_windows", + "/jdk/latest_zulu_13_darwin", + "/jdk/latest_zulu_13_darwin_aarch64", + "/jdk/latest_zulu_13_linux", + "/jdk/latest_zulu_13_linux_aarch64", + "/jdk/latest_zulu_13_windows", + "/jdk/latest_zulu_14_darwin", + "/jdk/latest_zulu_14_linux", + "/jdk/latest_zulu_14_windows", + "/jdk/latest_zulu_15_darwin", + "/jdk/latest_zulu_15_darwin_aarch64", + "/jdk/latest_zulu_15_linux", + "/jdk/latest_zulu_15_linux_aarch64", + "/jdk/latest_zulu_15_windows", + "/jdk/latest_zulu_16_darwin", + "/jdk/latest_zulu_16_darwin_aarch64", + "/jdk/latest_zulu_16_linux", + "/jdk/latest_zulu_16_linux_aarch64", + "/jdk/latest_zulu_16_windows", + "/jdk/latest_zulu_16_windows_aarch64", + "/jdk/latest_zulu_17_darwin", + "/jdk/latest_zulu_17_darwin_aarch64", + "/jdk/latest_zulu_17_linux", + "/jdk/latest_zulu_17_linux_aarch64", + "/jdk/latest_zulu_17_windows", + "/jdk/latest_zulu_17_windows_aarch64", + "/jdk/latest_zulu_18_darwin", + "/jdk/latest_zulu_18_darwin_aarch64", + "/jdk/latest_zulu_18_linux", + "/jdk/latest_zulu_18_linux_aarch64", + "/jdk/latest_zulu_18_windows", + "/jdk/latest_zulu_18_windows_aarch64", + "/jdk/latest_zulu_19_darwin", + "/jdk/latest_zulu_19_darwin_aarch64", + "/jdk/latest_zulu_19_linux", + "/jdk/latest_zulu_19_linux_aarch64", + "/jdk/latest_zulu_19_windows", + "/jdk/latest_zulu_19_windows_aarch64", + "/jdk/latest_zulu_1_linux_aarch64", + "/jdk/latest_zulu_20_darwin", + "/jdk/latest_zulu_20_darwin_aarch64", + "/jdk/latest_zulu_20_linux", + "/jdk/latest_zulu_20_linux_aarch64", + "/jdk/latest_zulu_20_windows", + "/jdk/latest_zulu_21_darwin", + "/jdk/latest_zulu_21_darwin_aarch64", + "/jdk/latest_zulu_21_linux", + "/jdk/latest_zulu_21_linux_aarch64", + "/jdk/latest_zulu_21_windows", + "/jdk/latest_zulu_21_windows_aarch64", + "/jdk/latest_zulu_22_darwin", + "/jdk/latest_zulu_22_darwin_aarch64", + "/jdk/latest_zulu_22_linux", + "/jdk/latest_zulu_22_linux_aarch64", + "/jdk/latest_zulu_22_windows", + "/jdk/latest_zulu_6_linux", + "/jdk/latest_zulu_6_windows", + "/jdk/latest_zulu_7_darwin", + "/jdk/latest_zulu_7_linux", + "/jdk/latest_zulu_7_windows", + "/jdk/latest_zulu_8_darwin", + "/jdk/latest_zulu_8_darwin_aarch64", + "/jdk/latest_zulu_8_linux", + "/jdk/latest_zulu_8_linux_aarch64", + "/jdk/latest_zulu_8_windows", + "/jdk/latest_zulu_9_darwin", + "/jdk/latest_zulu_9_linux", + "/jdk/latest_zulu_9_windows" + ] +} diff --git a/list-plain-deps.groovy b/list-plain-deps.groovy new file mode 100644 index 0000000000000..e7a863d9a8cee --- /dev/null +++ b/list-plain-deps.groovy @@ -0,0 +1,68 @@ +import java.nio.file.* +import java.nio.charset.StandardCharsets +import java.util.regex.Pattern + +def parseGradleFiles(Path directory) { + List configurations = ['api', + 'implementation', + "testImplementation", + "testRuntimeOnly", + "runtimeOnly"] + + def configsRexex = configurations.join('|') + def pattern = Pattern.compile(/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/) + def dependencies = [] + + Files.walk(directory).each { path -> + if (Files.isRegularFile(path) && path.toString().endsWith('.gradle')) { + def lines = Files.readAllLines(path, StandardCharsets.UTF_8) + lines.each { line -> + def matcher = pattern.matcher(line) + if (matcher.find()) { + def configuration = matcher.group(1) + def group = matcher.group(2) + def name = matcher.group(3) + def version = matcher.group(4) + dependencies << [file: path.toString(), configuration: configuration, group: group, name: name, version: version] + } + } + } + } + return dependencies +} + +String convertToVersionCatalogEntry(def dependencies) { + Set versions = new TreeSet<>() + Set entries = new TreeSet<>() + +} + +def main() { + // def directoryPath = System.console().readLine('Enter the directory path to search for *.gradle files: ').trim() + // def directory = Paths.get("directoryPath") + def directory = Paths.get("/Users/rene/dev/elastic/elasticsearch") + + if (!Files.exists(directory) || !Files.isDirectory(directory)) { + println "The directory '${directoryPath}' does not exist or is not a directory." + return + } + + def dependencies = parseGradleFiles(directory) + if (dependencies) { + def depsByFile = dependencies.groupBy {it.file} + depsByFile.each { file, deps -> + println "File: ${file}" + deps.each { dep -> + println "${dep.configuration} '${dep.group}:${dep.name}:${dep.version}'" + } + println "" + } + + println "Found ${dependencies.size()} dependencies in ${depsByFile.size()} files." + + } else { + println "No dependencies found." + } +} + +main() \ No newline at end of file diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index 515ab9d5f1822..efe2ff3449216 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=f8b4f4772d302c8ff580bc40d0f56e715de69b163546944f787c87abf209c961 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-all.zip +distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/versions.log b/versions.log new file mode 100644 index 0000000000000..10a88d7d1172c --- /dev/null +++ b/versions.log @@ -0,0 +1,595 @@ +Loaded version property: protobuf = 3.21.9 +Loaded version property: junit5 = 5.7.1 +Loaded version property: commons_lang3 = 3.9 +Loaded version property: jmh = 1.26 +Loaded version property: reflections = 0.10.2 +Loaded version property: lucene = 9.11.1 +Loaded version property: dockerJava = 3.3.4 +Loaded version property: opensaml = 4.3.0 +Loaded version property: commonslogging = 1.2 +Loaded version property: bouncycastle = 1.78.1 +Loaded version property: jackson = 2.15.0 +Loaded version property: elasticsearch = 8.15.0 +Loaded version property: testcontainer = 1.19.2 +Loaded version property: commonscodec = 1.15 +Loaded version property: jna = 5.12.1 +Loaded version property: jimfs = 1.3.0 +Loaded version property: netty = 4.1.109.Final +Loaded version property: log4j = 2.19.0 +Loaded version property: spatial4j = 0.7 +Loaded version property: junit = 4.13.2 +Loaded version property: commonsCompress = 1.24.0 +Loaded version property: slf4j = 2.0.6 +Loaded version property: bundled_jdk_vendor = openjdk +Loaded version property: icu4j = 68.2 +Loaded version property: jts = 1.15.0 +Loaded version property: supercsv = 2.4.0 +Loaded version property: randomizedrunner = 2.8.0 +Loaded version property: httpasyncclient = 4.1.5 +Loaded version property: google_oauth_client = 1.34.1 +Loaded version property: ductTape = 1.0.8 +Loaded version property: antlr4 = 4.13.1 +Loaded version property: jimfs_guava = 32.1.1-jre +Loaded version property: mocksocket = 1.2 +Loaded version property: bundled_jdk = 22.0.1+8@c7ec1332f7bb44aeba2eb341ae18aca4 +Loaded version property: networknt_json_schema_validator = 1.0.48 +Loaded version property: hamcrest = 2.1 +Loaded version property: ecsLogging = 1.2.0 +Loaded version property: snakeyaml = 2.0 +Loaded version property: httpclient = 4.5.14 +Loaded version property: httpcore = 4.4.13 +Version Properties: false +File: /Users/rene/dev/elastic/elasticsearch/test/framework/build.gradle +Resolving version: ${versions.randomizedrunner} +"com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" -> "[group:com.carrotsearch.randomizedtesting, name:randomizedtesting-runner, version:randomizedtesting-runner]" +Resolving version: ${versions.junit} +"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" +Resolving version: ${versions.hamcrest} +"org.hamcrest:hamcrest:${versions.hamcrest}" -> "[group:org.hamcrest, name:hamcrest, version:hamcrest]" +Resolving version: ${versions.lucene} +"org.apache.lucene:lucene-test-framework:${versions.lucene}" -> "[group:org.apache.lucene, name:lucene-test-framework, version:lucene-test-framework]" +Resolving version: ${versions.lucene} +"org.apache.lucene:lucene-codecs:${versions.lucene}" -> "[group:org.apache.lucene, name:lucene-codecs, version:lucene-codecs]" +Resolving version: ${versions.commonslogging} +"commons-logging:commons-logging:${versions.commonslogging}" -> "[group:commons-logging, name:commons-logging, version:commons-logging]" +Resolving version: ${versions.commonscodec} +"commons-codec:commons-codec:${versions.commonscodec}" -> "[group:commons-codec, name:commons-codec, version:commons-codec]" +Resolving version: 5.11.0 +"org.mockito:mockito-core:5.11.0" -> "[group:org.mockito, name:mockito-core, version:mockito-core]" +Resolving version: 5.11.0 +"org.mockito:mockito-subclass:5.11.0" -> "[group:org.mockito, name:mockito-subclass, version:mockito-subclass]" +Resolving version: 1.14.12 +"net.bytebuddy:byte-buddy:1.14.12" -> "[group:net.bytebuddy, name:byte-buddy, version:byte-buddy]" +Resolving version: 3.3 +"org.objenesis:objenesis:3.3" -> "[group:org.objenesis, name:objenesis, version:objenesis]" +Resolving version: ${versions.mocksocket} +"org.elasticsearch:mocksocket:${versions.mocksocket}" -> "[group:org.elasticsearch, name:mocksocket, version:mocksocket]" + +File: /Users/rene/dev/elastic/elasticsearch/test/test-clusters/build.gradle +Resolving version: ${versions.junit} +existingMajor: 4, newMajor: 4 +"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" +Resolving version: ${versions.log4j} +"org.apache.logging.log4j:log4j-api:${versions.log4j}" -> "[group:org.apache.logging.log4j, name:log4j-api, version:log4j-api]" +Resolving version: ${versions.jackson} +"com.fasterxml.jackson.core:jackson-core:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core]" +Resolving version: ${versions.jackson} +"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" +Resolving version: ${versions.jackson} +"com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-databind, version:jackson-databind]" + +File: /Users/rene/dev/elastic/elasticsearch/test/immutable-collections-patch/build.gradle +Resolving version: 9.7 +"org.ow2.asm:asm:9.7" -> "[group:org.ow2.asm, name:asm, version:asm]" +Resolving version: 9.7 +"org.ow2.asm:asm-tree:9.7" -> "[group:org.ow2.asm, name:asm-tree, version:asm-tree]" + +File: /Users/rene/dev/elastic/elasticsearch/test/logger-usage/build.gradle +Resolving version: 9.7 +existingMajor: 9, newMajor: 9 +"org.ow2.asm:asm:9.7" -> "[group:org.ow2.asm, name:asm, version:asm]" +Resolving version: 9.7 +existingMajor: 9, newMajor: 9 +"org.ow2.asm:asm-tree:9.7" -> "[group:org.ow2.asm, name:asm-tree, version:asm-tree]" +Resolving version: 9.7 +"org.ow2.asm:asm-analysis:9.7" -> "[group:org.ow2.asm, name:asm-analysis, version:asm-analysis]" +Resolving version: ${versions.log4j} +existingMajor: 2, newMajor: 2 +"org.apache.logging.log4j:log4j-api:${versions.log4j}" -> "[group:org.apache.logging.log4j, name:log4j-api, version:log4j-api]" + +File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/testcontainer-utils/build.gradle +Resolving version: ${versions.junit} +existingMajor: 4, newMajor: 4 +"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" +Resolving version: ${versions.testcontainer} +"org.testcontainers:testcontainers:${versions.testcontainer}" -> "[group:org.testcontainers, name:testcontainers, version:testcontainers]" +Resolving version: ${versions.randomizedrunner} +existingMajor: 2, newMajor: 2 +"com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" -> "[group:com.carrotsearch.randomizedtesting, name:randomizedtesting-runner, version:randomizedtesting-runner]" +Resolving version: ${versions.dockerJava} +"com.github.docker-java:docker-java-api:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-api, version:docker-java-api]" +Resolving version: ${versions.slf4j} +"org.slf4j:slf4j-api:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-api, version:slf4j-api]" +Resolving version: ${versions.dockerJava} +"com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-transport-zerodep, version:docker-java-transport-zerodep]" +Resolving version: ${versions.dockerJava} +"com.github.docker-java:docker-java-transport:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-transport, version:docker-java-transport]" +Resolving version: ${versions.dockerJava} +"com.github.docker-java:docker-java-core:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-core, version:docker-java-core]" +Resolving version: ${versions.commonsCompress} +"org.apache.commons:commons-compress:${versions.commonsCompress}" -> "[group:org.apache.commons, name:commons-compress, version:commons-compress]" +Resolving version: ${versions.ductTape} +"org.rnorth.duct-tape:duct-tape:${versions.ductTape}" -> "[group:org.rnorth.duct-tape, name:duct-tape, version:duct-tape]" +Resolving version: ${versions.jackson} +existingMajor: 2, newMajor: 2 +"com.fasterxml.jackson.core:jackson-core:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core]" +Resolving version: ${versions.jackson} +existingMajor: 2, newMajor: 2 +"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" + +File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/old-elasticsearch/build.gradle +Resolving version: ${versions.lucene} +"org.apache.lucene:lucene-core:${versions.lucene}" -> "[group:org.apache.lucene, name:lucene-core, version:lucene-core]" + +File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/krb5kdc-fixture/build.gradle +Resolving version: ${versions.junit} +existingMajor: 4, newMajor: 4 +"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" +Resolving version: ${versions.slf4j} +existingMajor: 2, newMajor: 2 +"org.slf4j:slf4j-api:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-api, version:slf4j-api]" +Resolving version: ${versions.dockerJava} +existingMajor: 3, newMajor: 3 +"com.github.docker-java:docker-java-api:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-api, version:docker-java-api]" +Resolving version: ${versions.jackson} +existingMajor: 2, newMajor: 2 +"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" +Resolving version: ${versions.slf4j} +"org.slf4j:slf4j-simple:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-simple, version:slf4j-simple]" +Resolving version: ${versions.hamcrest} +existingMajor: 2, newMajor: 2 +"org.hamcrest:hamcrest:${versions.hamcrest}" -> "[group:org.hamcrest, name:hamcrest, version:hamcrest]" + +File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/minio-fixture/build.gradle +Resolving version: ${versions.junit} +existingMajor: 4, newMajor: 4 +"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" +Resolving version: ${versions.slf4j} +existingMajor: 2, newMajor: 2 +"org.slf4j:slf4j-simple:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-simple, version:slf4j-simple]" + +File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/hdfs-fixture/build.gradle +Resolving version: ${versions.junit} +existingMajor: 4, newMajor: 4 +"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" +Resolving version: 2.8.5 +"org.apache.hadoop:hadoop-minicluster:2.8.5" -> "[group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster]" +Resolving version: 3.3.1 +existingMajor: 2, newMajor: 3 +"org.apache.hadoop:hadoop-minicluster:3.3.1" -> "[group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster3]" + +File: /Users/rene/dev/elastic/elasticsearch/test/x-content/build.gradle +Resolving version: ${versions.jackson} +existingMajor: 2, newMajor: 2 +"com.fasterxml.jackson.core:jackson-core:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core]" +Resolving version: ${versions.networknt_json_schema_validator} +"com.networknt:json-schema-validator:${versions.networknt_json_schema_validator}" -> "[group:com.networknt, name:json-schema-validator, version:json-schema-validator]" +Resolving version: ${versions.jackson} +existingMajor: 2, newMajor: 2 +"com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-databind, version:jackson-databind]" +Resolving version: ${versions.jackson} +existingMajor: 2, newMajor: 2 +"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" +Resolving version: 1.26.1 +existingMajor: 1, newMajor: 1 +"org.apache.commons:commons-compress:1.26.1" -> "[group:org.apache.commons, name:commons-compress, version:commons-compress]" +Resolving version: 2.15.1 +"commons-io:commons-io:2.15.1" -> "[group:commons-io, name:commons-io, version:commons-io]" +Resolving version: ${versions.commons_lang3} +"org.apache.commons:commons-lang3:${versions.commons_lang3}" -> "[group:org.apache.commons, name:commons-lang3, version:commons-lang3]" + +libraries Catalog versions +randomizedtesting-runner = [group:com.carrotsearch.randomizedtesting, name:randomizedtesting-runner, version:randomizedtesting-runner] +junit = [group:junit, name:junit, version:junit] +hamcrest = [group:org.hamcrest, name:hamcrest, version:hamcrest] +lucene-test-framework = [group:org.apache.lucene, name:lucene-test-framework, version:lucene-test-framework] +lucene-codecs = [group:org.apache.lucene, name:lucene-codecs, version:lucene-codecs] +commons-logging = [group:commons-logging, name:commons-logging, version:commons-logging] +commons-codec = [group:commons-codec, name:commons-codec, version:commons-codec] +mockito-core = [group:org.mockito, name:mockito-core, version:mockito-core] +mockito-subclass = [group:org.mockito, name:mockito-subclass, version:mockito-subclass] +byte-buddy = [group:net.bytebuddy, name:byte-buddy, version:byte-buddy] +objenesis = [group:org.objenesis, name:objenesis, version:objenesis] +mocksocket = [group:org.elasticsearch, name:mocksocket, version:mocksocket] +log4j-api = [group:org.apache.logging.log4j, name:log4j-api, version:log4j-api] +jackson-core = [group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core] +jackson-annotations = [group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations] +jackson-databind = [group:com.fasterxml.jackson.core, name:jackson-databind, version:jackson-databind] +asm = [group:org.ow2.asm, name:asm, version:asm] +asm-tree = [group:org.ow2.asm, name:asm-tree, version:asm-tree] +asm-analysis = [group:org.ow2.asm, name:asm-analysis, version:asm-analysis] +testcontainers = [group:org.testcontainers, name:testcontainers, version:testcontainers] +docker-java-api = [group:com.github.docker-java, name:docker-java-api, version:docker-java-api] +slf4j-api = [group:org.slf4j, name:slf4j-api, version:slf4j-api] +docker-java-transport-zerodep = [group:com.github.docker-java, name:docker-java-transport-zerodep, version:docker-java-transport-zerodep] +docker-java-transport = [group:com.github.docker-java, name:docker-java-transport, version:docker-java-transport] +docker-java-core = [group:com.github.docker-java, name:docker-java-core, version:docker-java-core] +commons-compress = [group:org.apache.commons, name:commons-compress, version:commons-compress] +duct-tape = [group:org.rnorth.duct-tape, name:duct-tape, version:duct-tape] +lucene-core = [group:org.apache.lucene, name:lucene-core, version:lucene-core] +slf4j-simple = [group:org.slf4j, name:slf4j-simple, version:slf4j-simple] +hadoop-minicluster = [group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster] +hadoop-minicluster3 = [group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster3] +json-schema-validator = [group:com.networknt, name:json-schema-validator, version:json-schema-validator] +commons-io = [group:commons-io, name:commons-io, version:commons-io] +commons-lang3 = [group:org.apache.commons, name:commons-lang3, version:commons-lang3] +Version Catalog libraries +randomizedtesting-runner = 2.8.0 +junit = 4.13.2 +hamcrest = 2.1 +lucene-test-framework = 9.11.1 +lucene-codecs = 9.11.1 +commons-logging = 1.2 +commons-codec = 1.15 +mockito-core = 5.11.0 +mockito-subclass = 5.11.0 +byte-buddy = 1.14.12 +objenesis = 3.3 +mocksocket = 1.2 +log4j-api = 2.19.0 +jackson-core = 2.15.0 +jackson-annotations = 2.15.0 +jackson-databind = 2.15.0 +asm = 9.7 +asm-tree = 9.7 +asm-analysis = 9.7 +testcontainers = 1.19.2 +docker-java-api = 3.3.4 +slf4j-api = 2.0.6 +docker-java-transport-zerodep = 3.3.4 +docker-java-transport = 3.3.4 +docker-java-core = 3.3.4 +commons-compress = 1.26.1 +duct-tape = 1.0.8 +lucene-core = 9.11.1 +slf4j-simple = 2.0.6 +hadoop-minicluster = 2.8.5 +hadoop-minicluster3 = 3.3.1 +json-schema-validator = 1.0.48 +commons-io = 2.15.1 +commons-lang3 = 3.9 +Found 54 dependencies in 10 files. +randomizedtesting-runner -> randomizedtestingrunner +null +junit -> junit +4.13.2 +hamcrest -> hamcrest +2.1 +lucene-codecs -> lucene +null +lucene-core -> lucene +9.11.1 +lucene-test-framework -> lucene +9.11.1 +commons-logging -> commonslogging +null +commons-codec -> commonscodec +null +mockito-subclass -> mockito +null +mockito-core -> mockito +5.11.0 +byte-buddy -> bytebuddy +null +mocksocket -> mocksocket +1.2 +log4j-api -> log4j +null +jackson-annotations -> jackson +null +jackson-databind -> jackson +2.15.0 +jackson-core -> jackson +2.15.0 +asm-tree -> asm +9.7 +asm-analysis -> asm +9.7 +docker-java-transport-zerodep -> docker +null +docker-java-transport -> docker +3.3.4 +docker-java-core -> docker +3.3.4 +docker-java-api -> docker +3.3.4 +slf4j-simple -> slf4j +null +slf4j-api -> slf4j +2.0.6 +commons-compress -> commonscompress +null +duct-tape -> ducttape +null +hadoop-minicluster -> hadoopminicluster +null +hadoop-minicluster3 -> hadoopminicluster3 +null +json-schema-validator -> jsonschemavalidator +null +commons-io -> commonsio +null +commons-lang3 -> commonslang3 +null + + +versions: +asm = "9.7" +bytebuddy = "1.14.12" +commonscodec = "1.15" +commonscompress = "1.26.1" +commonsio = "2.15.1" +commonslang3 = "3.9" +commonslogging = "1.2" +docker = "3.3.4" +ducttape = "1.0.8" +hadoopminicluster = "2.8.5" +hadoopminicluster3 = "3.3.1" +jackson = "2.15.0" +jsonschemavalidator = "1.0.48" +log4j = "2.19.0" +lucene = "9.11.1" +mockito = "5.11.0" +objenesis = "3.3" +randomizedtestingrunner = "2.8.0" +slf4j = "2.0.6" +testcontainers = "1.19.2" + + +libraries: +asm = { group = "org.ow2.asm", name = "asm", version.ref = "asm" } +asm-analysis = { group = "org.ow2.asm", name = "asm-analysis", version.ref = "asm" } +asm-tree = { group = "org.ow2.asm", name = "asm-tree", version.ref = "asm" } +byte-buddy = { group = "net.bytebuddy", name = "byte-buddy", version.ref = "bytebuddy" } +commons-codec = { group = "commons-codec", name = "commons-codec", version.ref = "commonscodec" } +commons-compress = { group = "org.apache.commons", name = "commons-compress", version.ref = "commonscompress" } +commons-io = { group = "commons-io", name = "commons-io", version.ref = "commonsio" } +commons-lang3 = { group = "org.apache.commons", name = "commons-lang3", version.ref = "commonslang3" } +commons-logging = { group = "commons-logging", name = "commons-logging", version.ref = "commonslogging" } +docker-java-api = { group = "com.github.docker-java", name = "docker-java-api", version.ref = "docker" } +docker-java-core = { group = "com.github.docker-java", name = "docker-java-core", version.ref = "docker" } +docker-java-transport = { group = "com.github.docker-java", name = "docker-java-transport", version.ref = "docker" } +docker-java-transport-zerodep = { group = "com.github.docker-java", name = "docker-java-transport-zerodep", version.ref = "docker" } +duct-tape = { group = "org.rnorth.duct-tape", name = "duct-tape", version.ref = "ducttape" } +hadoop-minicluster = { group = "org.apache.hadoop", name = "hadoop-minicluster", version.ref = "hadoopminicluster" } +hadoop-minicluster3 = { group = "org.apache.hadoop", name = "hadoop-minicluster", version.ref = "hadoopminicluster3" } +hamcrest = { group = "org.hamcrest", name = "hamcrest", version.ref = "hamcrest" } +jackson-annotations = { group = "com.fasterxml.jackson.core", name = "jackson-annotations", version.ref = "jackson" } +jackson-core = { group = "com.fasterxml.jackson.core", name = "jackson-core", version.ref = "jackson" } +jackson-databind = { group = "com.fasterxml.jackson.core", name = "jackson-databind", version.ref = "jackson" } +json-schema-validator = { group = "com.networknt", name = "json-schema-validator", version.ref = "jsonschemavalidator" } +junit = { group = "junit", name = "junit", version.ref = "junit" } +log4j-api = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } +lucene-codecs = { group = "org.apache.lucene", name = "lucene-codecs", version.ref = "lucene" } +lucene-core = { group = "org.apache.lucene", name = "lucene-core", version.ref = "lucene" } +lucene-test-framework = { group = "org.apache.lucene", name = "lucene-test-framework", version.ref = "lucene" } +mockito-core = { group = "org.mockito", name = "mockito-core", version.ref = "mockito" } +mockito-subclass = { group = "org.mockito", name = "mockito-subclass", version.ref = "mockito" } +mocksocket = { group = "org.elasticsearch", name = "mocksocket", version.ref = "mocksocket" } +objenesis = { group = "org.objenesis", name = "objenesis", version.ref = "objenesis" } +randomizedtesting-runner = { group = "com.carrotsearch.randomizedtesting", name = "randomizedtesting-runner", version.ref = "randomizedtestingrunner" } +slf4j-api = { group = "org.slf4j", name = "slf4j-api", version.ref = "slf4j" } +slf4j-simple = { group = "org.slf4j", name = "slf4j-simple", version.ref = "slf4j" } +testcontainers = { group = "org.testcontainers", name = "testcontainers", version.ref = "testcontainers" } + + +Final versions +antlr4 = "4.13.1" +asm = "9.7" +aws = "1.12.270" +azure = "12.20.1" +azureCommon = "12.19.1" +azureCore = "1.34.0" +azureCoreHttpNetty = "1.12.7" +azureJackson = "2.15.4" +azureJacksonDatabind = "2.13.4.2" +bytebuddy = "1.14.12" +commonscodec = "1.15" +commonscompress = "1.26.1" +commonsio = "2.15.1" +commonslang3 = "3.9" +commonslogging = "1.2" +docker = "3.3.4" +ducttape = "1.0.8" +ecsLogging = "1.2.0" +google_oauth_client = "1.34.1" +hadoopminicluster = "2.8.5" +hadoopminicluster3 = "3.3.1" +hamcrest = "2.1" +httpcore = "4.4.13" +icu4j = "68.2" +jackson = "2.15.0" +jakartaActivation = "1.2.1" +jakartaXMLBind = "2.3.2" +jmh = "1.26" +jna = "5.12.1" +jsonschemavalidator = "1.0.48" +jts = "1.15.0" +junit = "4.13.2" +junit5 = "5.8.1" +log4j = "2.19.0" +log4japi = "2.19.0" +lucene = "9.11.1" +mockito = "5.11.0" +mocksocket = "1.2" +netty = "4.1.109.Final" +objenesis = "3.3" +opentelemetry = "1.31.0" +protobuf = "3.21.9" +randomizedtestingrunner = "2.8.0" +reactiveStreams = "1.0.4" +reactorCore = "3.4.34" +reactorNetty = "1.0.39" +slf4j = "2.0.6" +spatial4j = "0.7" +stax2API = "4.2.1" +testcontainers = "1.19.2" +woodstox = "6.4.0" + + +[libraries] +antlr4-runtime = { group = "org.antlr", name = "antlr4-runtime", version.ref = "antlr4" } +api-common = { group = "com.google.api", name = "api-common", version = "2.3.1" } +apm-agent = "co.elastic.apm:elastic-apm-agent:1.44.0" +asm = { group = "org.ow2.asm", name = "asm", version.ref = "asm" } +asm-analysis = { group = "org.ow2.asm", name = "asm-analysis", version.ref = "asm" } +asm-commons = { group = "org.ow2.asm", name = "asm-commons", version.ref = "asm" } +asm-tree = { group = "org.ow2.asm", name = "asm-tree", version.ref = "asm" } +asm-util = { group = "org.ow2.asm", name = "asm-util", version.ref = "asm" } +aws-jmespath-java = { group = "com.amazonaws", name = "jmespath-java", version.ref = "aws" } +aws-java-sdk-s3 = { group = "com.amazonaws", name = "aws-java-sdk-s3", version.ref = "aws" } +aws-java-sdk-core = { group = "com.amazonaws", name = "aws-java-sdk-core", version.ref = "aws" } +aws-java-sdk-sts = { group = "com.amazonaws", name = "aws-java-sdk-sts", version.ref = "aws" } +azure-core = { group = "com.azure", name = "azure-core", version.ref = "azureCore" } +azure-core-http-netty = { group = "com.azure", name = "azure-core-http-netty", version.ref = "azureCoreHttpNetty" } +azure-jackson-core = { group = "com.fasterxml.jackson.core", name = "jackson-core", version.ref = "azureJackson" } +azure-jackson-databind = { group = "com.fasterxml.jackson.core", name = "jackson-databind", version.ref = "azureJacksonDatabind" } +azure-jackson-Annotations = { group = "com.fasterxml.jackson.core", name = "jackson-annotations", version.ref = "azureJackson" } +azure-jackson-dataformat-xml = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-xml", version.ref = "azureJackson" } +azure-jackson-datatype-jsr310 = { group = "com.fasterxml.jackson.datatype", name = "jackson-datatype-jsr310", version.ref = "azureJackson" } +azure-jackson-module-jaxb-annotations = { group = "com.fasterxml.jackson.module", name = "jackson-module-jaxb-annotations", version.ref = "azureJackson" } +azure-storage-blob = { group = "com.azure", name = "azure-storage-blob", version.ref = "azure" } +azure-storage-common = { group = "com.azure", name = "azure-storage-common", version.ref = "azureCommon" } +bc-fips = "org.bouncycastle:bc-fips:1.0.2.4" +bcpg-fips = "org.bouncycastle:bcpg-fips:1.0.7.1" +byte-buddy = { group = "net.bytebuddy", name = "byte-buddy", version.ref = "bytebuddy" } +commons-codec = { group = "commons-codec", name = "commons-codec", version.ref = "commonscodec" } +commons-compress = { group = "org.apache.commons", name = "commons-compress", version.ref = "commonscompress" } +commons-logging = { group = "commons-logging", name = "commons-logging", version.ref = "commonslogging" } +commons-math3 = "org.apache.commons:commons-math3:3.2" +commons-io = { group = "commons-io", name = "commons-io", version.ref = "commonsio" } +ecs-logging-core = { group = "co.elastic.logging", name = "ecs-logging-core", version.ref = "ecsLogging" } +gax = { group = "com.google.api", name = "gax", version = "2.20.1" } +gax-httpjson = { group = "com.google.api", name = "gax-httpjson", version = "0.105.1" } +geoip2 = "com.maxmind.geoip2:geoip2:4.2.0" +geolite2-datbase = "org.elasticsearch:geolite2-databases:20191119" +google-api-client = { group = "com.google.api-client", name = "google-api-client", version = "2.1.1" } +google-api-services-storage = { group = "com.google.apis", name = "google-api-services-storage", version = "v1-rev20220705-2.0.0" } +google-auth-library-credentials = { group = "com.google.auth", name = "google-auth-library-credentials", version = "1.11.0" } +google-auth-library-credentials-oauth2-http = { group = "com.google.auth", name = "google-auth-library-oauth2-http", version = "1.11.0" } +google-cloud-core = { group = "com.google.cloud", name = "google-cloud-core", version = "2.8.28" } +google-cloud-core-http = { group = "com.google.cloud", name = "google-cloud-core-http", version = "2.8.28" } +google-cloud-storage = { group = "com.google.cloud", name = "google-cloud-storage", version = "2.13.1" } +google-http-client = { group = "com.google.http-client", name = "google-http-client", version = "1.42.3" } +google-http-client-appengine = { group = "com.google.http-client", name = "google-http-client-appengine", version = "1.42.3" } +google-http-client-jackson2 = { group = "com.google.http-client", name = "google-http-client-jackson2", version = "1.42.3" } +google-http-client-json = { group = "com.google.http-client", name = "google-http-client-gson", version = "1.42.3" } +google-oauth-client = { group = "com.google.oauth-client", name = "google-oauth-client", version.ref = "google_oauth_client" } +grpc-context = { group = "io.grpc", name = "grpc-context", version = "1.49.2" } +gson = { group = "com.google.code.gson", name = "gson", version = "2.10" } +guava = { group = "com.google.guava", name = "guava", version = "32.0.1-jre" } +guava-failureaccess = { group = "com.google.guava", name = "failureaccess", version = "1.0.1" } +hamcrest = { group = "org.hamcrest", name = "hamcrest", version.ref = "hamcrest" } +hppc = "com.carrotsearch:hppc:0.8.1" +hdrhistogram = "org.hdrhistogram:HdrHistogram:2.1.9" +httpasyncclient = { group = "org.apache.httpcomponents", name = "httpasyncclient", version = "4.1.5" } +httpclient = { group = "org.apache.httpcomponents", name = "httpclient", version = "4.5.14" } +httpcore = { group = "org.apache.httpcomponents", name = "httpcore", version.ref = "httpcore" } +httpcore-nio = { group = "org.apache.httpcomponents", name = "httpcore-nio", version.ref = "httpcore" } +icu4j = { group = "com.ibm.icu", name = "icu4j", version.ref = "icu4j" } +jackson-core = { group = "com.fasterxml.jackson.core", name = "jackson-core", version.ref = "jackson" } +jackson-annotations = { group = "com.fasterxml.jackson.core", name = "jackson-annotations", version.ref = "jackson" } +jackson-databind = { group = "com.fasterxml.jackson.core", name = "jackson-databind", version.ref = "jackson" } +jackson-dataformat-smile = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-smile", version.ref = "jackson" } +jackson-dataformat-yaml = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-yaml", version.ref = "jackson" } +jackson-dataformat-cbor = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-cbor", version.ref = "jackson" } +jakarta-activation-api = { group = "jakarta.activation", name = "jakarta.activation-api", version.ref = "jakartaActivation" } +jakarta-xml-bind-api = { group = "jakarta.xml.bind", name = "jakarta.xml.bind-api", version.ref = "jakartaXMLBind" } +jansi = "org.fusesource.jansi:jansi:2.4.0" +jaxb-api = "javax.xml.bind:jaxb-api:2.2.2" +jcodings = { group = "org.jruby.jcodings", name = "jcodings", version = "1.0.44" } +jimfs = { group = "com.google.jimfs", name = "jimfs", version = "1.3.0" } +jimfs-guava = { group = "com.google.guava", name = "guava", version = "32.1.1-jre" } +jmh-core = { group = "org.openjdk.jmh", name = "jmh-core", version.ref = "jmh" } +jmh-generator-annprocess = { group = "org.openjdk.jmh", name = "jmh-generator-annprocess", version.ref = "jmh" } +jna = { group = "net.java.dev.jna", name = "jna", version.ref = "jna" } +joda-time = "joda-time:joda-time:2.10.14" +joni = { group = "org.jruby.joni", name = "joni", version = "2.1.29" } +jopt-simple = "net.sf.jopt-simple:jopt-simple:5.0.2" +jsr305 = "com.google.code.findbugs:jsr305:3.0.2" +jts-core = { group = "org.locationtech.jts", name = "jts-core", version.ref = "jts" } +junit = { group = "junit", name = "junit", version.ref = "junit" } +junit5-jupiter-api = { group = "org.junit.jupiter", name = "junit-jupiter-api", version.ref = "junit5" } +log4j12-api = { group = "org.apache.logging.log4j", name = "log4j-1.2-api", version.ref = "log4j" } +log4j2-ecs-layout = { group = "co.elastic.logging", name = "log4j2-ecs-layout", version.ref = "ecsLogging" } +log4j-api = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } +log4j-core = { group = "org.apache.logging.log4j", name = "log4j-core", version.ref = "log4j" } +lucene-core = { group = "org.apache.lucene", name = "lucene-core", version.ref = "lucene" } +lucene-analysis-common = { group = "org.apache.lucene", name = "lucene-analysis-common", version.ref = "lucene" } +lucene-analysis-icu = { group = "org.apache.lucene", name = "lucene-analysis-icu", version.ref = "lucene" } +lucene-analysis-kuromoji = { group = "org.apache.lucene", name = "lucene-analysis-kuromoji", version.ref = "lucene" } +lucene-analysis-morfologik = { group = "org.apache.lucene", name = "lucene-analysis-morfologik", version.ref = "lucene" } +lucene-analysis-nori = { group = "org.apache.lucene", name = "lucene-analysis-nori", version.ref = "lucene" } +lucene-analysis-phonetic = { group = "org.apache.lucene", name = "lucene-analysis-phonetic", version.ref = "lucene" } +lucene-analysis-smartcn = { group = "org.apache.lucene", name = "lucene-analysis-smartcn", version.ref = "lucene" } +lucene-analysis-stempel = { group = "org.apache.lucene", name = "lucene-analysis-stempel", version.ref = "lucene" } +lucene-backward-codecs = { group = "org.apache.lucene", name = "lucene-backward-codecs", version.ref = "lucene" } +lucene-codecs = { group = "org.apache.lucene", name = "lucene-codecs", version.ref = "lucene" } +lucene-expressions = { group = "org.apache.lucene", name = "lucene-expressions", version.ref = "lucene" } +lucene-highlighter = { group = "org.apache.lucene", name = "lucene-highlighter", version.ref = "lucene" } +lucene-grouping = { group = "org.apache.lucene", name = "lucene-grouping", version.ref = "lucene" } +lucene-join = { group = "org.apache.lucene", name = "lucene-join", version.ref = "lucene" } +lucene-memory = { group = "org.apache.lucene", name = "lucene-memory", version.ref = "lucene" } +lucene-misc = { group = "org.apache.lucene", name = "lucene-misc", version.ref = "lucene" } +lucene-queries = { group = "org.apache.lucene", name = "lucene-queries", version.ref = "lucene" } +lucene-queryparser = { group = "org.apache.lucene", name = "lucene-queryparser", version.ref = "lucene" } +lucene-sandbox = { group = "org.apache.lucene", name = "lucene-sandbox", version.ref = "lucene" } +lucene-suggest = { group = "org.apache.lucene", name = "lucene-suggest", version.ref = "lucene" } +lucene-spatial3d = { group = "org.apache.lucene", name = "lucene-spatial3d", version.ref = "lucene" } +lucene-spatial-extras = { group = "org.apache.lucene", name = "lucene-spatial-extras", version.ref = "lucene" } +lz4-java = { group = "org.lz4", name = "lz4-java", version = "1.8.0" } +maxmind-db = "com.maxmind.db:maxmind-db:3.1.0" +mockito-core = { group = "org.mockito", name = "mockito-core", version.ref = "mockito" } +mockito-subclass = { group = "org.mockito", name = "mockito-subclass", version.ref = "mockito" } +mocksocket = { group = "org.elasticsearch", name = "mocksocket", version.ref = "mocksocket" } +morfologik-stemming = "org.carrot2:morfologik-stemming:2.1.1" +morfologik-fsa = "org.carrot2:morfologik-fsa:2.1.1" +morfologik-ukrainian-search = "ua.net.nlp:morfologik-ukrainian-search:3.7.5" +mustache-compiler = "com.github.spullara.mustache.java:compiler:0.9.10" +netty-buffer = { group = "io.netty", name = "netty-buffer", version.ref = "netty" } +netty-codec = { group = "io.netty", name = "netty-codec", version.ref = "netty" } +netty-codec-dns = { group = "io.netty", name = "netty-codec-dns", version.ref = "netty" } +netty-codec-http = { group = "io.netty", name = "netty-codec-http", version.ref = "netty" } +netty-codec-http2 = { group = "io.netty", name = "netty-codec-http2", version.ref = "netty" } +netty-codec-socks = { group = "io.netty", name = "netty-codec-socks", version.ref = "netty" } +netty-common = { group = "io.netty", name = "netty-common", version.ref = "netty" } +netty-handler = { group = "io.netty", name = "netty-handler", version.ref = "netty" } +netty-handler-proxy = { group = "io.netty", name = "netty-handler-proxy", version.ref = "netty" } +netty-resolver = { group = "io.netty", name = "netty-resolver", version.ref = "netty" } +netty-resolver-dns = { group = "io.netty", name = "netty-resolver-dns", version.ref = "netty" } +netty-transport = { group = "io.netty", name = "netty-transport", version.ref = "netty" } +netty-transport-native-unix-common = { group = "io.netty", name = "netty-transport-native-unix-common", version.ref = "netty" } +objenesis = { group = "org.objenesis", name = "objenesis", version.ref = "objenesis" } +opencensus-api = { group = "io.opencensus", name = "opencensus-api", version = "0.31.1" } +opencensus-contrib-http-util = { group = "io.opencensus", name = "opencensus-contrib-http-util", version = "0.31.1" } +opentelemetry-api = { group = "io.opentelemetry", name = "opentelemetry-api", version.ref = "opentelemetry" } +opentelemetry-context = { group = "io.opentelemetry", name = "opentelemetry-context", version.ref = "opentelemetry" } +opentelemetry-semconv = { group = "io.opentelemetry", name = "opentelemetry-semconv", version = "1.21.0-alpha" } +proto-google-common-protos = { group = "com.google.api.grpc", name = "proto-google-common-protos", version = "2.9.6" } +proto-google-iam-v1 = { group = "com.google.api.grpc", name = "proto-google-iam-v1", version = "1.6.2" } +protobuf-java = { group = "com.google.protobuf", name = "protobuf-java", version.ref = "protobuf" } +protobuf-java-util = { group = "com.google.protobuf", name = "protobuf-java-util", version.ref = "protobuf" } +randomizedtesting-runner = { group = "com.carrotsearch.randomizedtesting", name = "randomizedtesting-runner", version.ref = "randomizedtestingrunner" } +reactor-netty-core = { group = "io.projectreactor.netty", name = "reactor-netty-core", version.ref = "reactorNetty" } +reactor-netty-http = { group = "io.projectreactor.netty", name = "reactor-netty-http", version.ref = "reactorNetty" } +reactor-core = { group = "io.projectreactor", name = "reactor-core", version.ref = "reactorCore" } +reactive-streams = { group = "org.reactivestreams", name = "reactive-streams", version.ref = "reactiveStreams" } +s2-geometry-library-java = { group = "io.sgr", name = "s2-geometry-library-java", version = "1.0.1" } +slf4j-api = { group = "org.slf4j", name = "slf4j-api", version.ref = "slf4j" } +slf4j-nop = { group = "org.slf4j", name = "slf4j-nop", version.ref = "slf4j" } +slf4j-simple = { group = "org.slf4j", name = "slf4j-simple", version.ref = "slf4j" } +snakeyaml = "org.yaml:snakeyaml:2.0" +spatial4j = { group = "org.locationtech.spatial4j", name = "spatial4j", version.ref = "spatial4j" } +stax2-api = { group = "org.codehaus.woodstox", name = "stax2-api", version.ref = "stax2API" } +threetenbp = { group = "org.threeten", name = "threetenbp", version = "1.6.5" } +woodstox-core = { group = "com.fasterxml.woodstox", name = "woodstox-core", version.ref = "woodstox" } From bdb1028cd06a482e353dae08f0d5db8a7235f190 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 19 Jul 2024 06:50:04 -0700 Subject: [PATCH 088/406] Skip preallocate tests on windows (#110998) (#111100) The preallocate tests assumed that preallocation was using the fallback implementation which calls setLength on Windows. However, that fallback only happens inside the SharedBytes class, so windows doesn't actually do anything when tryPreallocate is called. This commit skips the test on windows. closes #110948 --- .../java/org/elasticsearch/preallocate/PreallocateTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java b/libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java index e65327f9cd1d2..5c3f1d77f59da 100644 --- a/libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java +++ b/libs/preallocate/src/test/java/org/elasticsearch/preallocate/PreallocateTests.java @@ -18,13 +18,12 @@ public class PreallocateTests extends ESTestCase { public void testPreallocate() throws Exception { + assumeFalse("no preallocate on windows", System.getProperty("os.name").startsWith("Windows")); Path cacheFile = createTempFile(); long size = 1024 * 1024; // 1 MB Preallocate.preallocate(cacheFile, size); OptionalLong foundSize = FileSystemNatives.allocatedSizeInBytes(cacheFile); assertTrue(foundSize.isPresent()); - // Note that on Windows the fallback setLength is used. Although that creates a sparse - // file on Linux/MacOS, it full allocates the file on Windows assertThat(foundSize.getAsLong(), equalTo(size)); } } From aecc6e2cd8de16666ed225cdc258cbe4aae11fc1 Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Fri, 19 Jul 2024 16:26:13 +0200 Subject: [PATCH 089/406] [8.15] Rename `logs` index mode to `logsdb` (#111054) (#111098) --- docs/reference/data-streams/logs.asciidoc | 6 +- .../datastreams/LogsDataStreamIT.java | 12 +- .../datastreams/LogsDataStreamRestIT.java | 10 +- .../LogsIndexModeDisabledRestTestIT.java | 2 +- .../LogsIndexModeEnabledRestTestIT.java | 2 +- .../rest-api-spec/test/logsdb/10_settings.yml | 158 +++++++++--------- .../org/elasticsearch/index/IndexMode.java | 4 +- .../elasticsearch/index/IndexSortConfig.java | 8 +- .../index/codec/PerFieldFormatSupplier.java | 2 +- .../index/mapper/SourceFieldMapper.java | 10 +- .../indices/CreateIndexCapabilities.java | 4 +- .../index/LogsIndexModeTests.java | 20 +-- .../index/codec/PerFieldMapperCodecTests.java | 2 +- .../index/mapper/MapperServiceTestCase.java | 3 +- .../index/engine/FollowingEngineTests.java | 4 +- .../src/main/resources/logs@settings.json | 2 +- .../stack/LegacyStackTemplateRegistry.java | 2 +- .../xpack/stack/StackTemplateRegistry.java | 7 +- 18 files changed, 130 insertions(+), 128 deletions(-) diff --git a/docs/reference/data-streams/logs.asciidoc b/docs/reference/data-streams/logs.asciidoc index a2d8b6776e052..e870289bcf7be 100644 --- a/docs/reference/data-streams/logs.asciidoc +++ b/docs/reference/data-streams/logs.asciidoc @@ -1,7 +1,7 @@ [[logs-data-stream]] == Logs data stream -preview::[Logs data streams and the logs index mode are in tech preview and may be changed or removed in the future. Don't use logs data streams or logs index mode in production.] +preview::[Logs data streams and the logsdb index mode are in tech preview and may be changed or removed in the future. Don't use logs data streams or logsdb index mode in production.] A logs data stream is a data stream type that stores log data more efficiently. @@ -20,7 +20,7 @@ The following features are enabled in a logs data stream: [[how-to-use-logsds]] === Create a logs data stream -To create a logs data stream, set your index template `index.mode` to `logs`: +To create a logs data stream, set your index template `index.mode` to `logsdb`: [source,console] ---- @@ -30,7 +30,7 @@ PUT _index_template/my-index-template "data_stream": { }, "template": { "settings": { - "index.mode": "logs" <1> + "index.mode": "logsdb" <1> } }, "priority": 101 <2> diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index f95d9a0b0431f..52ce2a7a33ea6 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -165,7 +165,7 @@ public void testLogsIndexModeDataStreamIndexing() throws IOException, ExecutionE client(), "logs-composable-template", LOGS_OR_STANDARD_MAPPING, - Map.of("index.mode", "logs"), + Map.of("index.mode", "logsdb"), List.of("logs-*-*") ); final String dataStreamName = generateDataStreamName("logs"); @@ -188,7 +188,7 @@ public void testIndexModeLogsAndStandardSwitching() throws IOException, Executio ); createDataStream(client(), dataStreamName); for (int i = 0; i < randomIntBetween(5, 10); i++) { - final IndexMode indexMode = i % 2 == 0 ? IndexMode.LOGS : IndexMode.STANDARD; + final IndexMode indexMode = i % 2 == 0 ? IndexMode.LOGSDB : IndexMode.STANDARD; indexModes.add(indexMode); updateComposableIndexTemplate( client(), @@ -206,7 +206,7 @@ public void testIndexModeLogsAndStandardSwitching() throws IOException, Executio public void testIndexModeLogsAndTimeSeriesSwitching() throws IOException, ExecutionException, InterruptedException { final String dataStreamName = generateDataStreamName("custom"); final List indexPatterns = List.of("custom-*-*"); - final Map logsSettings = Map.of("index.mode", "logs"); + final Map logsSettings = Map.of("index.mode", "logsdb"); final Map timeSeriesSettings = Map.of("index.mode", "time_series", "index.routing_path", "host.name"); putComposableIndexTemplate(client(), "custom-composable-template", LOGS_OR_STANDARD_MAPPING, logsSettings, indexPatterns); @@ -221,13 +221,13 @@ public void testIndexModeLogsAndTimeSeriesSwitching() throws IOException, Execut rolloverDataStream(dataStreamName); indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName); - assertDataStreamBackingIndicesModes(dataStreamName, List.of(IndexMode.LOGS, IndexMode.TIME_SERIES, IndexMode.LOGS)); + assertDataStreamBackingIndicesModes(dataStreamName, List.of(IndexMode.LOGSDB, IndexMode.TIME_SERIES, IndexMode.LOGSDB)); } public void testInvalidIndexModeTimeSeriesSwitchWithoutRoutingPath() throws IOException, ExecutionException, InterruptedException { final String dataStreamName = generateDataStreamName("custom"); final List indexPatterns = List.of("custom-*-*"); - final Map logsSettings = Map.of("index.mode", "logs"); + final Map logsSettings = Map.of("index.mode", "logsdb"); final Map timeSeriesSettings = Map.of("index.mode", "time_series"); putComposableIndexTemplate(client(), "custom-composable-template", LOGS_OR_STANDARD_MAPPING, logsSettings, indexPatterns); @@ -249,7 +249,7 @@ public void testInvalidIndexModeTimeSeriesSwitchWithoutRoutingPath() throws IOEx public void testInvalidIndexModeTimeSeriesSwitchWithoutDimensions() throws IOException, ExecutionException, InterruptedException { final String dataStreamName = generateDataStreamName("custom"); final List indexPatterns = List.of("custom-*-*"); - final Map logsSettings = Map.of("index.mode", "logs"); + final Map logsSettings = Map.of("index.mode", "logsdb"); final Map timeSeriesSettings = Map.of("index.mode", "time_series", "index.routing_path", "host.name"); putComposableIndexTemplate(client(), "custom-composable-template", LOGS_OR_STANDARD_MAPPING, logsSettings, indexPatterns); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java index d3ec5b29ff5b9..780864db8b629 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java @@ -72,7 +72,7 @@ private static void waitForLogs(RestClient client) throws Exception { "template": { "settings": { "index": { - "mode": "logs" + "mode": "logsdb" } }, "mappings": { @@ -161,7 +161,7 @@ public void testLogsIndexing() throws IOException { randomIp(randomBoolean()) ) ); - assertDataStreamBackingIndexMode("logs", 0); + assertDataStreamBackingIndexMode("logsdb", 0); rolloverDataStream(client, DATA_STREAM_NAME); indexDocument( client, @@ -175,7 +175,7 @@ public void testLogsIndexing() throws IOException { randomIp(randomBoolean()) ) ); - assertDataStreamBackingIndexMode("logs", 1); + assertDataStreamBackingIndexMode("logsdb", 1); } public void testLogsStandardIndexModeSwitch() throws IOException { @@ -193,7 +193,7 @@ public void testLogsStandardIndexModeSwitch() throws IOException { randomIp(randomBoolean()) ) ); - assertDataStreamBackingIndexMode("logs", 0); + assertDataStreamBackingIndexMode("logsdb", 0); putTemplate(client, "custom-template", STANDARD_TEMPLATE); rolloverDataStream(client, DATA_STREAM_NAME); @@ -225,7 +225,7 @@ public void testLogsStandardIndexModeSwitch() throws IOException { randomIp(randomBoolean()) ) ); - assertDataStreamBackingIndexMode("logs", 2); + assertDataStreamBackingIndexMode("logsdb", 2); } private void assertDataStreamBackingIndexMode(final String indexMode, int backingIndex) throws IOException { diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeDisabledRestTestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeDisabledRestTestIT.java index dcd2457b88f18..fada21224e3b2 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeDisabledRestTestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeDisabledRestTestIT.java @@ -50,7 +50,7 @@ public void setup() throws Exception { public void testLogsSettingsIndexModeDisabled() throws IOException { assertOK(createDataStream(client, "logs-custom-dev")); final String indexMode = (String) getSetting(client, getDataStreamBackingIndex(client, "logs-custom-dev", 0), "index.mode"); - assertThat(indexMode, Matchers.not(equalTo(IndexMode.LOGS.getName()))); + assertThat(indexMode, Matchers.not(equalTo(IndexMode.LOGSDB.getName()))); } } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeEnabledRestTestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeEnabledRestTestIT.java index 832267cebf97c..a4277748ea9bd 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeEnabledRestTestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeEnabledRestTestIT.java @@ -179,7 +179,7 @@ public void testCreateDataStream() throws IOException { assertOK(putComponentTemplate(client, "logs@custom", MAPPINGS)); assertOK(createDataStream(client, "logs-custom-dev")); final String indexMode = (String) getSetting(client, getDataStreamBackingIndex(client, "logs-custom-dev", 0), "index.mode"); - assertThat(indexMode, equalTo(IndexMode.LOGS.getName())); + assertThat(indexMode, equalTo(IndexMode.LOGSDB.getName())); } public void testBulkIndexing() throws IOException { diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index de751e7c5f4df..07cb154449a70 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -5,8 +5,8 @@ setup: capabilities: - method: PUT path: /{index} - capabilities: [logs_index_mode] - reason: "Support for 'logs' index mode capability required" + capabilities: [logsdb_index_mode] + reason: "Support for 'logsdb' index mode capability required" --- create logs index: @@ -15,8 +15,8 @@ create logs index: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -24,7 +24,7 @@ create logs index: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 mappings: @@ -75,7 +75,7 @@ create logs index: index: test - is_true: test - - match: { test.settings.index.mode: "logs" } + - match: { test.settings.index.mode: "logsdb" } - do: indices.get_mapping: @@ -89,8 +89,8 @@ using default timestamp field mapping: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -98,7 +98,7 @@ using default timestamp field mapping: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 mappings: @@ -116,42 +116,42 @@ using default timestamp field mapping: --- missing hostname field: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" - - - do: - indices.create: - index: test-hostname-missing - body: - settings: - index: - mode: logs - number_of_replicas: 0 - number_of_shards: 2 - mappings: - properties: - "@timestamp": - type: date - agent_id: - type: keyword - process_id: - type: integer - http_method: - type: keyword - message: - type: text - - - do: - indices.get_settings: - index: test-hostname-missing - - - is_true: test-hostname-missing - - match: { test-hostname-missing.settings.index.mode: "logs" } + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" + + - do: + indices.create: + index: test-hostname-missing + body: + settings: + index: + mode: logsdb + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + agent_id: + type: keyword + process_id: + type: integer + http_method: + type: keyword + message: + type: text + + - do: + indices.get_settings: + index: test-hostname-missing + + - is_true: test-hostname-missing + - match: { test-hostname-missing.settings.index.mode: "logsdb" } --- missing sort field: @@ -160,8 +160,8 @@ missing sort field: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: catch: bad_request @@ -170,7 +170,7 @@ missing sort field: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 sort: @@ -201,8 +201,8 @@ non-default sort settings: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -211,7 +211,7 @@ non-default sort settings: settings: index: - mode: logs + mode: logsdb number_of_shards: 2 number_of_replicas: 0 sort: @@ -237,7 +237,7 @@ non-default sort settings: index: test-sort - is_true: test-sort - - match: { test-sort.settings.index.mode: "logs" } + - match: { test-sort.settings.index.mode: "logsdb" } - match: { test-sort.settings.index.sort.field.0: "agent_id" } - match: { test-sort.settings.index.sort.field.1: "@timestamp" } - match: { test-sort.settings.index.sort.order.0: "asc" } @@ -254,8 +254,8 @@ override sort order settings: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -264,7 +264,7 @@ override sort order settings: settings: index: - mode: logs + mode: logsdb number_of_shards: 2 number_of_replicas: 0 sort: @@ -289,7 +289,7 @@ override sort order settings: index: test-sort-order - is_true: test-sort-order - - match: { test-sort-order.settings.index.mode: "logs" } + - match: { test-sort-order.settings.index.mode: "logsdb" } - match: { test-sort-order.settings.index.sort.field.0: null } - match: { test-sort-order.settings.index.sort.field.1: null } - match: { test-sort-order.settings.index.sort.order.0: "asc" } @@ -302,8 +302,8 @@ override sort missing settings: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -312,7 +312,7 @@ override sort missing settings: settings: index: - mode: logs + mode: logsdb number_of_shards: 2 number_of_replicas: 0 sort: @@ -337,7 +337,7 @@ override sort missing settings: index: test-sort-missing - is_true: test-sort-missing - - match: { test-sort-missing.settings.index.mode: "logs" } + - match: { test-sort-missing.settings.index.mode: "logsdb" } - match: { test-sort-missing.settings.index.sort.field.0: null } - match: { test-sort-missing.settings.index.sort.field.1: null } - match: { test-sort-missing.settings.index.sort.missing.0: "_last" } @@ -350,8 +350,8 @@ override sort mode settings: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -360,7 +360,7 @@ override sort mode settings: settings: index: - mode: logs + mode: logsdb number_of_shards: 2 number_of_replicas: 0 sort: @@ -385,7 +385,7 @@ override sort mode settings: index: test-sort-mode - is_true: test-sort-mode - - match: { test-sort-mode.settings.index.mode: "logs" } + - match: { test-sort-mode.settings.index.mode: "logsdb" } - match: { test-sort-mode.settings.index.sort.field.0: null } - match: { test-sort-mode.settings.index.sort.field.1: null } - match: { test-sort-mode.settings.index.sort.mode.0: "max" } @@ -399,8 +399,8 @@ override sort field using nested field type in sorting: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: catch: bad_request @@ -409,7 +409,7 @@ override sort field using nested field type in sorting: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 sort: @@ -446,8 +446,8 @@ override sort field using nested field type: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -455,7 +455,7 @@ override sort field using nested field type: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 mappings: @@ -486,8 +486,8 @@ routing path not allowed in logs mode: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: catch: bad_request @@ -496,7 +496,7 @@ routing path not allowed in logs mode: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 routing_path: [ "host.name", "agent_id" ] @@ -526,8 +526,8 @@ start time not allowed in logs mode: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: catch: bad_request @@ -536,7 +536,7 @@ start time not allowed in logs mode: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 time_series: @@ -567,8 +567,8 @@ end time not allowed in logs mode: capabilities: - method: PUT path: /{index} - capabilities: [ logs_index_mode ] - reason: "Support for 'logs' index mode capability required" + capabilities: [ logsdb_index_mode ] + reason: "Support for 'logsdb' index mode capability required" - do: catch: bad_request @@ -577,7 +577,7 @@ end time not allowed in logs mode: body: settings: index: - mode: logs + mode: logsdb number_of_replicas: 0 number_of_shards: 2 time_series: diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 1050455392482..bc8bf8103fa08 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -223,7 +223,7 @@ public boolean isSyntheticSourceEnabled() { return true; } }, - LOGS("logs") { + LOGSDB("logsdb") { @Override void validateWithOtherSettings(Map, Object> settings) { IndexMode.validateTimeSeriesSettings(settings); @@ -469,7 +469,7 @@ public static IndexMode fromString(String value) { return switch (value) { case "standard" -> IndexMode.STANDARD; case "time_series" -> IndexMode.TIME_SERIES; - case "logs" -> IndexMode.LOGS; + case "logsdb" -> IndexMode.LOGSDB; default -> throw new IllegalArgumentException( "[" + value diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index f190462d6d1e9..a11a51ef7ad62 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -153,14 +153,14 @@ public IndexSortConfig(IndexSettings indexSettings) { } List fields = INDEX_SORT_FIELD_SETTING.get(settings); - if (this.indexMode == IndexMode.LOGS && fields.isEmpty()) { + if (this.indexMode == IndexMode.LOGSDB && fields.isEmpty()) { fields = List.of("host.name", DataStream.TIMESTAMP_FIELD_NAME); } this.sortSpecs = fields.stream().map(FieldSortSpec::new).toArray(FieldSortSpec[]::new); if (INDEX_SORT_ORDER_SETTING.exists(settings)) { List orders = INDEX_SORT_ORDER_SETTING.get(settings); - if (this.indexMode == IndexMode.LOGS && orders.isEmpty()) { + if (this.indexMode == IndexMode.LOGSDB && orders.isEmpty()) { orders = List.of(SortOrder.DESC, SortOrder.DESC); } if (orders.size() != sortSpecs.length) { @@ -175,7 +175,7 @@ public IndexSortConfig(IndexSettings indexSettings) { if (INDEX_SORT_MODE_SETTING.exists(settings)) { List modes = INDEX_SORT_MODE_SETTING.get(settings); - if (this.indexMode == IndexMode.LOGS && modes.isEmpty()) { + if (this.indexMode == IndexMode.LOGSDB && modes.isEmpty()) { modes = List.of(MultiValueMode.MIN, MultiValueMode.MIN); } if (modes.size() != sortSpecs.length) { @@ -188,7 +188,7 @@ public IndexSortConfig(IndexSettings indexSettings) { if (INDEX_SORT_MISSING_SETTING.exists(settings)) { List missingValues = INDEX_SORT_MISSING_SETTING.get(settings); - if (this.indexMode == IndexMode.LOGS && missingValues.isEmpty()) { + if (this.indexMode == IndexMode.LOGSDB && missingValues.isEmpty()) { missingValues = List.of("_first", "_first"); } if (missingValues.size() != sortSpecs.length) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java index 0b4bb9dfc10ae..1228c908f7c18 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java @@ -123,7 +123,7 @@ private boolean isTimeSeriesModeIndex() { } private boolean isLogsModeIndex() { - return mapperService != null && IndexMode.LOGS == mapperService.getIndexSettings().getMode(); + return mapperService != null && IndexMode.LOGSDB == mapperService.getIndexSettings().getMode(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 67e457907f8cc..908108bce31da 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -69,12 +69,12 @@ private enum Mode { IndexMode.TIME_SERIES ); - private static final SourceFieldMapper LOGS_DEFAULT = new SourceFieldMapper( + private static final SourceFieldMapper LOGSDB_DEFAULT = new SourceFieldMapper( Mode.SYNTHETIC, Explicit.IMPLICIT_TRUE, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY, - IndexMode.LOGS + IndexMode.LOGSDB ); /* @@ -184,7 +184,7 @@ public SourceFieldMapper build() { if (isDefault()) { return switch (indexMode) { case TIME_SERIES -> TSDB_DEFAULT; - case LOGS -> LOGS_DEFAULT; + case LOGSDB -> LOGSDB_DEFAULT; default -> DEFAULT; }; } @@ -234,8 +234,8 @@ public SourceFieldMapper build() { } else { return TSDB_LEGACY_DEFAULT; } - } else if (indexMode == IndexMode.LOGS) { - return LOGS_DEFAULT; + } else if (indexMode == IndexMode.LOGSDB) { + return LOGSDB_DEFAULT; } } return DEFAULT; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java index 700baac09865e..218348325e0a4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java @@ -18,7 +18,7 @@ public class CreateIndexCapabilities { /** * Support for using the 'logs' index mode. */ - private static final String LOGS_INDEX_MODE_CAPABILITY = "logs_index_mode"; + private static final String LOGSDB_INDEX_MODE_CAPABILITY = "logsdb_index_mode"; - public static Set CAPABILITIES = Set.of(LOGS_INDEX_MODE_CAPABILITY); + public static Set CAPABILITIES = Set.of(LOGSDB_INDEX_MODE_CAPABILITY); } diff --git a/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java b/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java index caddc7d5ea5af..84a9682635c8c 100644 --- a/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java @@ -16,7 +16,7 @@ public class LogsIndexModeTests extends ESTestCase { public void testLogsIndexModeSetting() { - assertThat(IndexSettings.MODE.get(buildSettings()), equalTo(IndexMode.LOGS)); + assertThat(IndexSettings.MODE.get(buildSettings()), equalTo(IndexMode.LOGSDB)); } public void testSortField() { @@ -25,9 +25,9 @@ public void testSortField() { .put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "agent_id") .build(); final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings); - assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGS)); + assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB)); final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - assertThat(settings.getMode(), equalTo(IndexMode.LOGS)); + assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB)); assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey()))); } @@ -38,9 +38,9 @@ public void testSortMode() { .put(IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), "max") .build(); final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings); - assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGS)); + assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB)); final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - assertThat(settings.getMode(), equalTo(IndexMode.LOGS)); + assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB)); assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey()))); assertThat("max", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey()))); } @@ -52,9 +52,9 @@ public void testSortOrder() { .put(IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), "desc") .build(); final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings); - assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGS)); + assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB)); final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - assertThat(settings.getMode(), equalTo(IndexMode.LOGS)); + assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB)); assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey()))); assertThat("desc", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey()))); } @@ -66,15 +66,15 @@ public void testSortMissing() { .put(IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey(), "_last") .build(); final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", sortSettings); - assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGS)); + assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB)); final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); - assertThat(settings.getMode(), equalTo(IndexMode.LOGS)); + assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB)); assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey()))); assertThat("_last", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_MISSING_SETTING.getKey()))); } private Settings buildSettings() { - return Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOGS.getName()).build(); + return Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()).build(); } private String getIndexSetting(final IndexSettings settings, final String name) { diff --git a/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java index 525fa31673494..122e238fb6346 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java @@ -188,7 +188,7 @@ public void testUseTimeSeriesModeAndCodecEnabled() throws IOException { } public void testLogsIndexMode() throws IOException { - PerFieldFormatSupplier perFieldMapperCodec = createFormatSupplier(true, IndexMode.LOGS, MAPPING_3); + PerFieldFormatSupplier perFieldMapperCodec = createFormatSupplier(true, IndexMode.LOGSDB, MAPPING_3); assertThat((perFieldMapperCodec.useTSDBDocValuesFormat("@timestamp")), is(true)); assertThat((perFieldMapperCodec.useTSDBDocValuesFormat("hostname")), is(true)); assertThat((perFieldMapperCodec.useTSDBDocValuesFormat("response_size")), is(true)); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index a6b737f162547..fa48309821b16 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; @@ -151,7 +152,7 @@ protected final DocumentMapper createTimeSeriesModeDocumentMapper(XContentBuilde } protected final DocumentMapper createLogsModeDocumentMapper(XContentBuilder mappings) throws IOException { - Settings settings = Settings.builder().put(IndexSettings.MODE.getKey(), "logs").build(); + Settings settings = Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()).build(); return createMapperService(settings, mappings).documentMapper(); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index fbddfc7683d2f..478a0d08d6612 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -697,8 +697,8 @@ public void testProcessOnceOnPrimary() throws Exception { case TIME_SERIES: settingsBuilder.put("index.mode", "time_series").put("index.routing_path", "foo"); break; - case LOGS: - settingsBuilder.put("index.mode", "logs"); + case LOGSDB: + settingsBuilder.put("index.mode", IndexMode.LOGSDB.getName()); break; default: throw new UnsupportedOperationException("Unknown index mode [" + indexMode + "]"); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json index 240abf9934db5..e9a9f2611ad7b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json @@ -5,7 +5,7 @@ "lifecycle": { "name": "logs" }, - "mode": "${xpack.stack.template.logs.index.mode}", + "mode": "${xpack.stack.template.logsdb.index.mode}", "codec": "best_compression", "mapping": { "ignore_malformed": true, diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java index 4d2789dbb8591..62d22c0c0a9cc 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java @@ -54,7 +54,7 @@ public class LegacyStackTemplateRegistry extends IndexTemplateRegistry { private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of( "xpack.stack.template.deprecated", "true", - "xpack.stack.template.logs.index.mode", + "xpack.stack.template.logsdb.index.mode", "standard" ); diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index 7dc1dfb6cf3df..6a9936f4f27d3 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -58,7 +59,7 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { ); /** - * if index.mode "logs" is applied by default in logs@settings for 'logs-*-*' + * if index.mode "logsdb" is applied by default in logs@settings for 'logs-*-*' */ public static final Setting CLUSTER_LOGSDB_ENABLED = Setting.boolSetting( "cluster.logsdb.enabled", @@ -166,8 +167,8 @@ private Map loadComponentTemplateConfigs(boolean logs Map.of( "xpack.stack.template.deprecated", "false", - "xpack.stack.template.logs.index.mode", - logsDbEnabled ? "logs" : "standard" + "xpack.stack.template.logsdb.index.mode", + logsDbEnabled ? IndexMode.LOGSDB.getName() : IndexMode.STANDARD.getName() ) ), new IndexTemplateConfig( From 6318929810ce208ae2982f89dddcefe6a83de387 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 19 Jul 2024 20:10:05 +0200 Subject: [PATCH 090/406] Cleanup repository after adding accidental files to PR (#111108) (#111110) With https://github.com/elastic/elasticsearch/pull/110109 i accidentally added local files to the repo. This removes them again. --- convert-deps.groovy | 24 - generate-version-catalog.groovy | 319 ---- jdks.log | 3146 ------------------------------- list-plain-deps.groovy | 68 - versions.log | 595 ------ 5 files changed, 4152 deletions(-) delete mode 100644 convert-deps.groovy delete mode 100644 generate-version-catalog.groovy delete mode 100644 jdks.log delete mode 100644 list-plain-deps.groovy delete mode 100644 versions.log diff --git a/convert-deps.groovy b/convert-deps.groovy deleted file mode 100644 index 23b2a8dafb496..0000000000000 --- a/convert-deps.groovy +++ /dev/null @@ -1,24 +0,0 @@ -import groovy.io.FileType -import java.nio.file.* -import java.nio.charset.StandardCharsets -import java.util.regex.Pattern - -// Define the base directory to start the search -def baseDir = new File('/Users/rene/dev/elastic/elasticsearch/plugins') - def pattern = Pattern.compile(/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/) - -// Define the pattern to match dependencies -def dependencyPattern = ~/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/ - -baseDir.eachFileRecurse(FileType.FILES) { file -> - if (file.name.endsWith('.gradle')) { - def content = file.text - def newContent = content.replaceAll(dependencyPattern) { match, config, group, name, version -> - def libName = "${name.replaceAll('-', '.')}".toLowerCase() - "$config libs.${libName}" - } - file.text = newContent - } -} - -println "Dependency patterns replaced successfully." \ No newline at end of file diff --git a/generate-version-catalog.groovy b/generate-version-catalog.groovy deleted file mode 100644 index fe4890e725599..0000000000000 --- a/generate-version-catalog.groovy +++ /dev/null @@ -1,319 +0,0 @@ -import java.nio.file.* -import java.nio.charset.StandardCharsets -import java.util.regex.Pattern - -REPO_ROOT = "/Users/rene/dev/elastic/elasticsearch/plugins" -VERSION_PROPS = REPO_ROOT + "/../build-tools-internal/version.properties" - -def parseGradleFiles(Path directory) { - def pattern = Pattern.compile(/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/) - def dependencies = [] - - Files.walk(directory).each { path -> - if (Files.isRegularFile(path) && path.toString().endsWith('.gradle') && path.toString().contains("plugins/examples") == false){ - def lines = Files.readAllLines(path, StandardCharsets.UTF_8) - lines.each { line -> - def matcher = pattern.matcher(line) - if (matcher.find()) { - def configuration = matcher.group(1) - def group = matcher.group(2) - def name = matcher.group(3) - def version = matcher.group(4) - dependencies << [file: path.toString(), configuration: configuration, group: group, name: name, version: version] - } - } - } - } - return dependencies -} - -String convertToVersionCatalogEntry(def dependencies) { - Set versions = new TreeSet<>() - Set entries = new TreeSet<>() - -} - -def resolveVersion(Properties props, String versionString) { - println "Resolving version: ${versionString}" - if(versionString.startsWith("\${versions.")) { - def versionId = versionString.substring(versionString.indexOf('.') + 1, versionString.indexOf('}')) - if(props.containsKey(versionId)) { - return props.getProperty(versionId) - } else { - println "unknown version ${versionString} found in build.gradle file. Please add it to the version.properties file." - return versionId - } - } - - return versionString -} - - -Properties loadVersionProperties() { - def properties = new Properties() - def file = new File(VERSION_PROPS) - if (!file.exists()) { - println "The properties file '${VERSION_PROPS}' does not exist." - return null - } - file.withInputStream { stream -> - properties.load(stream) - } - properties.each { key, value -> - println "Loaded version property: ${key} = ${value}" - } - return properties -} - -def convertToCamelCase(String input) { - def parts = input.split('-') - def camelCaseString = parts[0] - parts.tail().each { part -> - // for now skip camel casing - //camelCaseString += part.capitalize() - camelCaseString += part - } - return camelCaseString -} - -String calculateVersionRef(String libraryName, Map versionCatalog, Properties properties, String version) { - // String versionRefName = convertToCamelCase(libraryName) - String versionRefName = libraryName - - if(versionCatalog.containsKey(versionRefName)) { - def existingMajor = versionCatalog[libraryName].split("\\.")[0] as int - def newMajor = version.split("\\.")[0] as int - println "existingMajor: ${existingMajor}, newMajor: ${newMajor}" - - if(newMajor > existingMajor) { - return versionRefName + newMajor - } - } - return versionRefName -} - -def checkOptimizations(Map versionCatalog, Properties versionProperties) { - def simplifications = [:] - versionCatalog.each { givenKey, givenVersion -> - def simpleKey = givenKey.contains("-") ? givenKey.split('-')[0] : givenKey - def candidates = versionCatalog.findAll {k, v -> givenKey != k && k.startsWith("${simpleKey}-")} - if(candidates.size() == 0 && versionProperties[simpleKey] != null) { - assert versionProperties[simpleKey] == givenVersion - simplifications[givenKey] = simpleKey - } else { - candidates.each {candidateKey , candidateVersion -> - if(candidateVersion == givenVersion) { - simplifications[candidateKey] = simpleKey - } - } - } - - if(simplifications[givenKey] == null){ - def converted = convertToCamelCase(givenKey) - - if(givenKey != converted) { - simplifications[givenKey] = converted - } - } - } - - return simplifications -} - - -def parseValue(value) { - if (value.startsWith('"') && value.endsWith('"')) { - return value[1..-2] // String value - } else if (value ==~ /\d+/) { - return value.toInteger() // Integer value - } else if (value ==~ /\d+\.\d+/) { - return value.toDouble() // Double value - } else if (value == 'true' || value == 'false') { - return value.toBoolean() // Boolean value - } else if (value.startsWith('[') && value.endsWith(']')) { - return value[1..-2].split(',').collect { parseValue(it.trim()) } // Array value - } else { - return value // Default to string if not matched - } -} - -def parseTomlFile(filePath) { - def tomlMap = [:] - def currentSection = null - def file = new File(filePath) - - file.eachLine { line -> - line = line.trim() - - if (line.startsWith('#') || line.isEmpty()) { - // Skip comments and empty lines - return - } - - if (line.startsWith('[') && line.endsWith(']')) { - // New section - currentSection = line[1..-2] - tomlMap[currentSection] = [:] - } else if (line.contains('=')) { - // Key-value pair - def (key, value) = line.split('=', 2).collect { it.trim() } - value = parseValue(value) - if (currentSection) { - tomlMap[currentSection][key] = value - } else { - tomlMap[key] = value - } - } - } - - return tomlMap -} - -def main() { - // def directoryPath = System.console().readLine('Enter the directory path to search for *.gradle files: ').trim() - // def directory = Paths.get("directoryPath") - def directory = Paths.get(REPO_ROOT) - - if (!Files.exists(directory) || !Files.isDirectory(directory)) { - println "The directory '${directoryPath}' does not exist or is not a directory." - return - } - - def dependencies = parseGradleFiles(directory) - - def librariesCatalog = [:] - def versionsCatalog = [:] - - Properties versionProperties = loadVersionProperties() - println "Version Properties: ${versionProperties.contains('junit')}" - if (dependencies) { - def depsByFile = dependencies.groupBy {it.file} - depsByFile.each { file, deps -> - println "File: ${file}" - deps.each { dep -> - def effectiveVersion = resolveVersion(versionProperties, dep.version) - def versionRefName = calculateVersionRef(dep.name, versionsCatalog, versionProperties, effectiveVersion) - versionsCatalog.put(versionRefName, effectiveVersion) - depLibraryEntry = [group: dep.group, name: dep.name, version:versionRefName] - println "\"${dep.group}:${dep.name}:${dep.version}\" -> \"${depLibraryEntry}\"" - if(librariesCatalog.containsKey(versionRefName)) { - assert librariesCatalog[versionRefName] == depLibraryEntry - } else { - librariesCatalog.put(versionRefName, depLibraryEntry) - } - } - - println "" - } - - println "libraries Catalog versions" - - librariesCatalog.each { key, value -> - println "${key} = ${value}" - } - - println "Version Catalog libraries" - versionsCatalog.each { key, value -> - println "${key} = ${value}" - } - println "Found ${dependencies.size()} dependencies in ${depsByFile.size()} files." - - } else { - println "No dependencies found." - } - - def versionOptimizations = checkOptimizations(versionsCatalog, versionProperties) - - versionOptimizations.each { given, simplified -> - println "$given -> $simplified" - println "${versionsCatalog[simplified]}" - if(versionsCatalog[simplified] == null) { - versionsCatalog[simplified] = versionsCatalog[given] - } - versionsCatalog.remove(given) - } - - librariesCatalog.each { key, value -> - def simplified = versionOptimizations[key] - if(simplified != null) { - librariesCatalog[key].version = simplified - } - } - - println "\n\nversions: " - versionsCatalog.sort().each { key, value -> - println "${key} = \"${value}\"" - } - - librariesCatalog.sort() - println "\n\nlibraries: " - librariesCatalog.sort().each { k, v -> - println "${k} = { group = \"${v['group']}\", name = \"${v['name']}\", version.ref = \"${v['version']}\" } " - } - - // Example usage - def tomlFilePath = '/Users/rene/dev/elastic/elasticsearch/gradle/versions.toml' - def parsedToml = parseTomlFile(tomlFilePath) - - // Access parsed data - existingVersions = parsedToml['versions'] - -// println "\n\nExisting versions:" -// existingVersions.forEach { key, value -> -// println "${key} = ${value}" -// } - -// existingLibs = parsedToml['libraries'] - -// existingLibs.forEach { key, value -> -// println "${key} = ${value}" -// } - -def finalVersions = [:] -def finalLibraries = [:] - -existingVersions.each { key, value -> - finalVersions[key] = value - if(versionsCatalog.containsKey(key)) { - assert value == versionsCatalog[key] - versionsCatalog.remove(key) - } -} -finalVersions.putAll(versionsCatalog) - - -println "\n\n[versions]" -finalVersions.sort().each { key, value -> - println "${key} = \"${value}\"" -} - -def existingLibs = parsedToml['libraries'] -existingLibs.each { key, value -> - finalLibraries[key] = value - if(librariesCatalog[key] != null) { - def newValue = librariesCatalog[key] - assert value == "{ group = \"${newValue['group']}\", name = \"${newValue['name']}\", version.ref = \"${newValue['version']}\" }" - librariesCatalog.remove(key) - } -} -finalLibraries.putAll(librariesCatalog) - -println "\n\n[libraries]" -finalLibraries.sort().each { key, value -> - if(value instanceof Map) { - println "${key} = { group = \"${value['group']}\", name = \"${value['name']}\", version.ref = \"${value['version']}\" }" - } else if (value.startsWith("{")) { - println "${key} = $value" - } else { - println "${key} = \"$value\"" - } -} - -// println "Title: ${parsedToml['title']}" -// println "Owner Name: ${parsedToml['versions']['name']}" -// println "Database Server: ${parsedToml['database']['server']}" -// println "Database Ports: ${parsedToml['database']['ports']}" - -} - -main() \ No newline at end of file diff --git a/jdks.log b/jdks.log deleted file mode 100644 index 1354bcbe6c1b4..0000000000000 --- a/jdks.log +++ /dev/null @@ -1,3146 +0,0 @@ -{ - "service": "elastic_jvm_service", - "valid_locations": [ - "/manifest.json", - "/jdks", - "/jdk/adoptiumjdk-11.0.12+7-darwin", - "/jdk/adoptiumjdk-11.0.12+7-linux", - "/jdk/adoptiumjdk-11.0.12+7-linux-aarch64", - "/jdk/adoptiumjdk-11.0.12+7-windows", - "/jdk/adoptiumjdk-11.0.12+7-windows-x86_32", - "/jdk/adoptiumjdk-11.0.13+8-darwin", - "/jdk/adoptiumjdk-11.0.13+8-linux", - "/jdk/adoptiumjdk-11.0.13+8-linux-aarch64", - "/jdk/adoptiumjdk-11.0.13+8-windows", - "/jdk/adoptiumjdk-11.0.13+8-windows-x86_32", - "/jdk/adoptiumjdk-11.0.14+9-darwin", - "/jdk/adoptiumjdk-11.0.14+9-linux", - "/jdk/adoptiumjdk-11.0.14+9-linux-aarch64", - "/jdk/adoptiumjdk-11.0.14+9-windows", - "/jdk/adoptiumjdk-11.0.14+9-windows-x86_32", - "/jdk/adoptiumjdk-11.0.14.1+1-darwin", - "/jdk/adoptiumjdk-11.0.14.1+1-linux", - "/jdk/adoptiumjdk-11.0.14.1+1-linux-aarch64", - "/jdk/adoptiumjdk-11.0.14.1+1-windows", - "/jdk/adoptiumjdk-11.0.14.1+1-windows-x86_32", - "/jdk/adoptiumjdk-11.0.15+10-darwin", - "/jdk/adoptiumjdk-11.0.15+10-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.15+10-linux", - "/jdk/adoptiumjdk-11.0.15+10-linux-aarch64", - "/jdk/adoptiumjdk-11.0.15+10-windows", - "/jdk/adoptiumjdk-11.0.15+10-windows-x86_32", - "/jdk/adoptiumjdk-11.0.16+8-darwin", - "/jdk/adoptiumjdk-11.0.16+8-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.16+8-linux", - "/jdk/adoptiumjdk-11.0.16+8-linux-aarch64", - "/jdk/adoptiumjdk-11.0.16+8-windows", - "/jdk/adoptiumjdk-11.0.16+8-windows-x86_32", - "/jdk/adoptiumjdk-11.0.16.1+1-darwin", - "/jdk/adoptiumjdk-11.0.16.1+1-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.16.1+1-linux", - "/jdk/adoptiumjdk-11.0.16.1+1-linux-aarch64", - "/jdk/adoptiumjdk-11.0.16.1+1-windows", - "/jdk/adoptiumjdk-11.0.16.1+1-windows-x86_32", - "/jdk/adoptiumjdk-11.0.17+8-darwin", - "/jdk/adoptiumjdk-11.0.17+8-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.17+8-linux", - "/jdk/adoptiumjdk-11.0.17+8-linux-aarch64", - "/jdk/adoptiumjdk-11.0.17+8-windows", - "/jdk/adoptiumjdk-11.0.17+8-windows-x86_32", - "/jdk/adoptiumjdk-11.0.18+10-darwin", - "/jdk/adoptiumjdk-11.0.18+10-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.18+10-linux", - "/jdk/adoptiumjdk-11.0.18+10-linux-aarch64", - "/jdk/adoptiumjdk-11.0.18+10-windows", - "/jdk/adoptiumjdk-11.0.18+10-windows-x86_32", - "/jdk/adoptiumjdk-11.0.19+7-darwin", - "/jdk/adoptiumjdk-11.0.19+7-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.19+7-linux", - "/jdk/adoptiumjdk-11.0.19+7-linux-aarch64", - "/jdk/adoptiumjdk-11.0.19+7-windows", - "/jdk/adoptiumjdk-11.0.19+7-windows-x86_32", - "/jdk/adoptiumjdk-11.0.20+8-darwin", - "/jdk/adoptiumjdk-11.0.20+8-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.20+8-linux", - "/jdk/adoptiumjdk-11.0.20+8-linux-aarch64", - "/jdk/adoptiumjdk-11.0.20+8-windows", - "/jdk/adoptiumjdk-11.0.20+8-windows-x86_32", - "/jdk/adoptiumjdk-11.0.20.1+1-darwin", - "/jdk/adoptiumjdk-11.0.20.1+1-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.20.1+1-linux", - "/jdk/adoptiumjdk-11.0.20.1+1-linux-aarch64", - "/jdk/adoptiumjdk-11.0.20.1+1-windows", - "/jdk/adoptiumjdk-11.0.20.1+1-windows-x86_32", - "/jdk/adoptiumjdk-11.0.21+9-darwin", - "/jdk/adoptiumjdk-11.0.21+9-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.21+9-linux", - "/jdk/adoptiumjdk-11.0.21+9-linux-aarch64", - "/jdk/adoptiumjdk-11.0.21+9-windows", - "/jdk/adoptiumjdk-11.0.21+9-windows-x86_32", - "/jdk/adoptiumjdk-11.0.22+7-darwin", - "/jdk/adoptiumjdk-11.0.22+7-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.22+7-linux", - "/jdk/adoptiumjdk-11.0.22+7-linux-aarch64", - "/jdk/adoptiumjdk-11.0.22+7-windows", - "/jdk/adoptiumjdk-11.0.22+7-windows-x86_32", - "/jdk/adoptiumjdk-11.0.23+9-darwin", - "/jdk/adoptiumjdk-11.0.23+9-darwin-aarch64", - "/jdk/adoptiumjdk-11.0.23+9-linux", - "/jdk/adoptiumjdk-11.0.23+9-linux-aarch64", - "/jdk/adoptiumjdk-11.0.23+9-windows", - "/jdk/adoptiumjdk-11.0.23+9-windows-x86_32", - "/jdk/adoptiumjdk-16.0.2+7-darwin", - "/jdk/adoptiumjdk-16.0.2+7-linux", - "/jdk/adoptiumjdk-16.0.2+7-linux-aarch64", - "/jdk/adoptiumjdk-16.0.2+7-windows", - "/jdk/adoptiumjdk-16.0.2+7-windows-x86_32", - "/jdk/adoptiumjdk-17+35-darwin", - "/jdk/adoptiumjdk-17+35-darwin-aarch64", - "/jdk/adoptiumjdk-17+35-linux", - "/jdk/adoptiumjdk-17+35-linux-aarch64", - "/jdk/adoptiumjdk-17+35-windows", - "/jdk/adoptiumjdk-17+35-windows-x86_32", - "/jdk/adoptiumjdk-17.0.1+12-darwin", - "/jdk/adoptiumjdk-17.0.1+12-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.1+12-linux", - "/jdk/adoptiumjdk-17.0.1+12-linux-aarch64", - "/jdk/adoptiumjdk-17.0.1+12-windows", - "/jdk/adoptiumjdk-17.0.1+12-windows-x86_32", - "/jdk/adoptiumjdk-17.0.10+7-darwin", - "/jdk/adoptiumjdk-17.0.10+7-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.10+7-linux", - "/jdk/adoptiumjdk-17.0.10+7-linux-aarch64", - "/jdk/adoptiumjdk-17.0.10+7-windows", - "/jdk/adoptiumjdk-17.0.10+7-windows-x86_32", - "/jdk/adoptiumjdk-17.0.11+9-darwin", - "/jdk/adoptiumjdk-17.0.11+9-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.11+9-linux", - "/jdk/adoptiumjdk-17.0.11+9-linux-aarch64", - "/jdk/adoptiumjdk-17.0.11+9-windows", - "/jdk/adoptiumjdk-17.0.11+9-windows-x86_32", - "/jdk/adoptiumjdk-17.0.2+8-darwin", - "/jdk/adoptiumjdk-17.0.2+8-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.2+8-linux", - "/jdk/adoptiumjdk-17.0.2+8-linux-aarch64", - "/jdk/adoptiumjdk-17.0.2+8-windows", - "/jdk/adoptiumjdk-17.0.2+8-windows-x86_32", - "/jdk/adoptiumjdk-17.0.3+7-darwin", - "/jdk/adoptiumjdk-17.0.3+7-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.3+7-linux", - "/jdk/adoptiumjdk-17.0.3+7-linux-aarch64", - "/jdk/adoptiumjdk-17.0.3+7-windows", - "/jdk/adoptiumjdk-17.0.3+7-windows-x86_32", - "/jdk/adoptiumjdk-17.0.4+8-darwin", - "/jdk/adoptiumjdk-17.0.4+8-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.4+8-linux", - "/jdk/adoptiumjdk-17.0.4+8-linux-aarch64", - "/jdk/adoptiumjdk-17.0.4+8-windows", - "/jdk/adoptiumjdk-17.0.4+8-windows-x86_32", - "/jdk/adoptiumjdk-17.0.4.1+1-darwin", - "/jdk/adoptiumjdk-17.0.4.1+1-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.4.1+1-linux", - "/jdk/adoptiumjdk-17.0.4.1+1-linux-aarch64", - "/jdk/adoptiumjdk-17.0.4.1+1-windows", - "/jdk/adoptiumjdk-17.0.4.1+1-windows-x86_32", - "/jdk/adoptiumjdk-17.0.5+8-darwin", - "/jdk/adoptiumjdk-17.0.5+8-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.5+8-linux", - "/jdk/adoptiumjdk-17.0.5+8-linux-aarch64", - "/jdk/adoptiumjdk-17.0.5+8-windows", - "/jdk/adoptiumjdk-17.0.5+8-windows-x86_32", - "/jdk/adoptiumjdk-17.0.6+10-darwin", - "/jdk/adoptiumjdk-17.0.6+10-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.6+10-linux", - "/jdk/adoptiumjdk-17.0.6+10-linux-aarch64", - "/jdk/adoptiumjdk-17.0.6+10-windows", - "/jdk/adoptiumjdk-17.0.6+10-windows-x86_32", - "/jdk/adoptiumjdk-17.0.7+7-darwin", - "/jdk/adoptiumjdk-17.0.7+7-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.7+7-linux", - "/jdk/adoptiumjdk-17.0.7+7-linux-aarch64", - "/jdk/adoptiumjdk-17.0.7+7-windows", - "/jdk/adoptiumjdk-17.0.7+7-windows-x86_32", - "/jdk/adoptiumjdk-17.0.8+7-darwin", - "/jdk/adoptiumjdk-17.0.8+7-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.8+7-linux", - "/jdk/adoptiumjdk-17.0.8+7-linux-aarch64", - "/jdk/adoptiumjdk-17.0.8+7-windows", - "/jdk/adoptiumjdk-17.0.8+7-windows-x86_32", - "/jdk/adoptiumjdk-17.0.8.1+1-darwin", - "/jdk/adoptiumjdk-17.0.8.1+1-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.8.1+1-linux", - "/jdk/adoptiumjdk-17.0.8.1+1-linux-aarch64", - "/jdk/adoptiumjdk-17.0.8.1+1-windows", - "/jdk/adoptiumjdk-17.0.8.1+1-windows-x86_32", - "/jdk/adoptiumjdk-17.0.9+9-darwin", - "/jdk/adoptiumjdk-17.0.9+9-darwin-aarch64", - "/jdk/adoptiumjdk-17.0.9+9-linux", - "/jdk/adoptiumjdk-17.0.9+9-linux-aarch64", - "/jdk/adoptiumjdk-17.0.9+9-windows", - "/jdk/adoptiumjdk-17.0.9+9-windows-x86_32", - "/jdk/adoptiumjdk-21.0.1+12-darwin", - "/jdk/adoptiumjdk-21.0.1+12-darwin-aarch64", - "/jdk/adoptiumjdk-21.0.1+12-linux", - "/jdk/adoptiumjdk-21.0.1+12-linux-aarch64", - "/jdk/adoptiumjdk-21.0.1+12-windows", - "/jdk/adoptiumjdk-21.0.2+13-darwin", - "/jdk/adoptiumjdk-21.0.2+13-darwin-aarch64", - "/jdk/adoptiumjdk-21.0.2+13-linux", - "/jdk/adoptiumjdk-21.0.2+13-linux-aarch64", - "/jdk/adoptiumjdk-21.0.2+13-windows", - "/jdk/adoptiumjdk-21.0.3+9-darwin", - "/jdk/adoptiumjdk-21.0.3+9-darwin-aarch64", - "/jdk/adoptiumjdk-21.0.3+9-linux", - "/jdk/adoptiumjdk-21.0.3+9-linux-aarch64", - "/jdk/adoptiumjdk-21.0.3+9-windows", - "/jdk/adoptiumjdk-8u302-darwin", - "/jdk/adoptiumjdk-8u302-linux", - "/jdk/adoptiumjdk-8u302-linux-aarch64", - "/jdk/adoptiumjdk-8u302-windows", - "/jdk/adoptiumjdk-8u302-windows-x86_32", - "/jdk/adoptiumjdk-8u312-darwin", - "/jdk/adoptiumjdk-8u312-linux", - "/jdk/adoptiumjdk-8u312-linux-aarch64", - "/jdk/adoptiumjdk-8u312-windows", - "/jdk/adoptiumjdk-8u312-windows-x86_32", - "/jdk/adoptiumjdk-8u322-darwin", - "/jdk/adoptiumjdk-8u322-linux", - "/jdk/adoptiumjdk-8u322-linux-aarch64", - "/jdk/adoptiumjdk-8u322-windows", - "/jdk/adoptiumjdk-8u322-windows-x86_32", - "/jdk/adoptiumjdk-8u332-darwin", - "/jdk/adoptiumjdk-8u332-linux", - "/jdk/adoptiumjdk-8u332-linux-aarch64", - "/jdk/adoptiumjdk-8u332-windows", - "/jdk/adoptiumjdk-8u332-windows-x86_32", - "/jdk/adoptiumjdk-8u342-linux", - "/jdk/adoptiumjdk-8u342-linux-aarch64", - "/jdk/adoptiumjdk-8u342-windows", - "/jdk/adoptiumjdk-8u342-windows-x86_32", - "/jdk/adoptiumjdk-8u345-darwin", - "/jdk/adoptiumjdk-8u345-linux", - "/jdk/adoptiumjdk-8u345-linux-aarch64", - "/jdk/adoptiumjdk-8u345-windows", - "/jdk/adoptiumjdk-8u345-windows-x86_32", - "/jdk/adoptiumjdk-8u352-darwin", - "/jdk/adoptiumjdk-8u352-linux", - "/jdk/adoptiumjdk-8u352-linux-aarch64", - "/jdk/adoptiumjdk-8u352-windows", - "/jdk/adoptiumjdk-8u352-windows-x86_32", - "/jdk/adoptiumjdk-8u362-darwin", - "/jdk/adoptiumjdk-8u362-linux", - "/jdk/adoptiumjdk-8u362-linux-aarch64", - "/jdk/adoptiumjdk-8u362-windows", - "/jdk/adoptiumjdk-8u362-windows-x86_32", - "/jdk/adoptiumjdk-8u372-darwin", - "/jdk/adoptiumjdk-8u372-linux", - "/jdk/adoptiumjdk-8u372-linux-aarch64", - "/jdk/adoptiumjdk-8u372-windows", - "/jdk/adoptiumjdk-8u372-windows-x86_32", - "/jdk/adoptiumjdk-8u382-darwin", - "/jdk/adoptiumjdk-8u382-linux", - "/jdk/adoptiumjdk-8u382-linux-aarch64", - "/jdk/adoptiumjdk-8u382-windows", - "/jdk/adoptiumjdk-8u382-windows-x86_32", - "/jdk/adoptiumjdk-8u392-darwin", - "/jdk/adoptiumjdk-8u392-linux", - "/jdk/adoptiumjdk-8u392-linux-aarch64", - "/jdk/adoptiumjdk-8u392-windows", - "/jdk/adoptiumjdk-8u392-windows-x86_32", - "/jdk/adoptiumjdk-8u402-darwin", - "/jdk/adoptiumjdk-8u402-linux", - "/jdk/adoptiumjdk-8u402-linux-aarch64", - "/jdk/adoptiumjdk-8u402-windows", - "/jdk/adoptiumjdk-8u402-windows-x86_32", - "/jdk/adoptiumjdk-8u412-darwin", - "/jdk/adoptiumjdk-8u412-linux", - "/jdk/adoptiumjdk-8u412-linux-aarch64", - "/jdk/adoptiumjdk-8u412-windows", - "/jdk/adoptiumjdk-8u412-windows-x86_32", - "/jdk/adoptopenjdk-11+28-darwin", - "/jdk/adoptopenjdk-11+28-linux", - "/jdk/adoptopenjdk-11+28-windows", - "/jdk/adoptopenjdk-11.0.1+13-darwin", - "/jdk/adoptopenjdk-11.0.1+13-linux", - "/jdk/adoptopenjdk-11.0.1+13-linux-aarch64", - "/jdk/adoptopenjdk-11.0.1+13-windows", - "/jdk/adoptopenjdk-11.0.10+9-darwin", - "/jdk/adoptopenjdk-11.0.10+9-linux", - "/jdk/adoptopenjdk-11.0.10+9-linux-aarch64", - "/jdk/adoptopenjdk-11.0.10+9-windows", - "/jdk/adoptopenjdk-11.0.10+9-windows-x86_32", - "/jdk/adoptopenjdk-11.0.11+9-darwin", - "/jdk/adoptopenjdk-11.0.11+9-linux", - "/jdk/adoptopenjdk-11.0.11+9-linux-aarch64", - "/jdk/adoptopenjdk-11.0.11+9-windows", - "/jdk/adoptopenjdk-11.0.11+9-windows-x86_32", - "/jdk/adoptopenjdk-11.0.2+7-darwin", - "/jdk/adoptopenjdk-11.0.2+7-linux", - "/jdk/adoptopenjdk-11.0.2+7-linux-aarch64", - "/jdk/adoptopenjdk-11.0.2+7-windows", - "/jdk/adoptopenjdk-11.0.2+9-darwin", - "/jdk/adoptopenjdk-11.0.2+9-linux", - "/jdk/adoptopenjdk-11.0.2+9-linux-aarch64", - "/jdk/adoptopenjdk-11.0.2+9-windows", - "/jdk/adoptopenjdk-11.0.3+7-darwin", - "/jdk/adoptopenjdk-11.0.3+7-linux", - "/jdk/adoptopenjdk-11.0.3+7-linux-aarch64", - "/jdk/adoptopenjdk-11.0.3+7-windows", - "/jdk/adoptopenjdk-11.0.4+11-darwin", - "/jdk/adoptopenjdk-11.0.4+11-linux", - "/jdk/adoptopenjdk-11.0.4+11-linux-aarch64", - "/jdk/adoptopenjdk-11.0.4+11-windows", - "/jdk/adoptopenjdk-11.0.5+10-darwin", - "/jdk/adoptopenjdk-11.0.5+10-linux", - "/jdk/adoptopenjdk-11.0.5+10-windows", - "/jdk/adoptopenjdk-11.0.6+10-darwin", - "/jdk/adoptopenjdk-11.0.6+10-linux", - "/jdk/adoptopenjdk-11.0.6+10-linux-aarch64", - "/jdk/adoptopenjdk-11.0.6+10-windows", - "/jdk/adoptopenjdk-11.0.7+10-darwin", - "/jdk/adoptopenjdk-11.0.7+10-linux", - "/jdk/adoptopenjdk-11.0.7+10-linux-aarch64", - "/jdk/adoptopenjdk-11.0.7+10-windows", - "/jdk/adoptopenjdk-11.0.8+10-darwin", - "/jdk/adoptopenjdk-11.0.8+10-linux", - "/jdk/adoptopenjdk-11.0.8+10-linux-aarch64", - "/jdk/adoptopenjdk-11.0.8+10-windows", - "/jdk/adoptopenjdk-11.0.8+10-windows-x86_32", - "/jdk/adoptopenjdk-11.0.9+11-darwin", - "/jdk/adoptopenjdk-11.0.9+11-linux", - "/jdk/adoptopenjdk-11.0.9+11-linux-aarch64", - "/jdk/adoptopenjdk-11.0.9+11-windows", - "/jdk/adoptopenjdk-11.0.9+11-windows-x86_32", - "/jdk/adoptopenjdk-11.0.9.1+1-darwin", - "/jdk/adoptopenjdk-11.0.9.1+1-linux", - "/jdk/adoptopenjdk-11.0.9.1+1-linux-aarch64", - "/jdk/adoptopenjdk-11.0.9.1+1-windows", - "/jdk/adoptopenjdk-11.0.9.1+1-windows-x86_32", - "/jdk/adoptopenjdk-12+33-darwin", - "/jdk/adoptopenjdk-12+33-linux", - "/jdk/adoptopenjdk-12+33-windows", - "/jdk/adoptopenjdk-12.0.1+12-darwin", - "/jdk/adoptopenjdk-12.0.1+12-linux", - "/jdk/adoptopenjdk-12.0.1+12-linux-aarch64", - "/jdk/adoptopenjdk-12.0.1+12-windows", - "/jdk/adoptopenjdk-12.0.2+10-darwin", - "/jdk/adoptopenjdk-12.0.2+10-linux", - "/jdk/adoptopenjdk-12.0.2+10-linux-aarch64", - "/jdk/adoptopenjdk-12.0.2+10-windows", - "/jdk/adoptopenjdk-12.0.2+10-windows-x86_32", - "/jdk/adoptopenjdk-13.0.1+9-darwin", - "/jdk/adoptopenjdk-13.0.1+9-linux", - "/jdk/adoptopenjdk-13.0.1+9-windows", - "/jdk/adoptopenjdk-13.0.2+8-darwin", - "/jdk/adoptopenjdk-13.0.2+8-linux", - "/jdk/adoptopenjdk-13.0.2+8-linux-aarch64", - "/jdk/adoptopenjdk-13.0.2+8-windows", - "/jdk/adoptopenjdk-13.0.2+8-windows-x86_32", - "/jdk/adoptopenjdk-14.0.1+7-darwin", - "/jdk/adoptopenjdk-14.0.1+7-linux", - "/jdk/adoptopenjdk-14.0.1+7-linux-aarch64", - "/jdk/adoptopenjdk-14.0.1+7-windows", - "/jdk/adoptopenjdk-14.0.2+12-darwin", - "/jdk/adoptopenjdk-14.0.2+12-linux", - "/jdk/adoptopenjdk-14.0.2+12-linux-aarch64", - "/jdk/adoptopenjdk-14.0.2+12-windows", - "/jdk/adoptopenjdk-14.0.2+12-windows-x86_32", - "/jdk/adoptopenjdk-15+36-darwin", - "/jdk/adoptopenjdk-15+36-linux", - "/jdk/adoptopenjdk-15+36-linux-aarch64", - "/jdk/adoptopenjdk-15+36-windows", - "/jdk/adoptopenjdk-15+36-windows-x86_32", - "/jdk/adoptopenjdk-15.0.1+9-darwin", - "/jdk/adoptopenjdk-15.0.1+9-linux", - "/jdk/adoptopenjdk-15.0.1+9-linux-aarch64", - "/jdk/adoptopenjdk-15.0.1+9-windows", - "/jdk/adoptopenjdk-15.0.1+9-windows-x86_32", - "/jdk/adoptopenjdk-15.0.2+7-darwin", - "/jdk/adoptopenjdk-15.0.2+7-linux", - "/jdk/adoptopenjdk-15.0.2+7-linux-aarch64", - "/jdk/adoptopenjdk-15.0.2+7-windows", - "/jdk/adoptopenjdk-15.0.2+7-windows-x86_32", - "/jdk/adoptopenjdk-16+36-darwin", - "/jdk/adoptopenjdk-16+36-linux", - "/jdk/adoptopenjdk-16+36-linux-aarch64", - "/jdk/adoptopenjdk-16+36-windows", - "/jdk/adoptopenjdk-16+36-windows-x86_32", - "/jdk/adoptopenjdk-16.0.1+9-darwin", - "/jdk/adoptopenjdk-16.0.1+9-linux", - "/jdk/adoptopenjdk-16.0.1+9-linux-aarch64", - "/jdk/adoptopenjdk-16.0.1+9-windows", - "/jdk/adoptopenjdk-16.0.1+9-windows-x86_32", - "/jdk/adoptopenjdk-8u181-darwin", - "/jdk/adoptopenjdk-8u181-linux", - "/jdk/adoptopenjdk-8u181-windows", - "/jdk/adoptopenjdk-8u191-linux-aarch64", - "/jdk/adoptopenjdk-8u192-darwin", - "/jdk/adoptopenjdk-8u192-linux", - "/jdk/adoptopenjdk-8u192-windows", - "/jdk/adoptopenjdk-8u202-darwin", - "/jdk/adoptopenjdk-8u202-linux", - "/jdk/adoptopenjdk-8u202-windows", - "/jdk/adoptopenjdk-8u212-darwin", - "/jdk/adoptopenjdk-8u212-linux", - "/jdk/adoptopenjdk-8u212-windows", - "/jdk/adoptopenjdk-8u222-darwin", - "/jdk/adoptopenjdk-8u222-linux", - "/jdk/adoptopenjdk-8u222-linux-aarch64", - "/jdk/adoptopenjdk-8u222-windows", - "/jdk/adoptopenjdk-8u232-darwin", - "/jdk/adoptopenjdk-8u232-linux", - "/jdk/adoptopenjdk-8u232-linux-aarch64", - "/jdk/adoptopenjdk-8u232-windows", - "/jdk/adoptopenjdk-8u242-darwin", - "/jdk/adoptopenjdk-8u242-linux", - "/jdk/adoptopenjdk-8u242-windows", - "/jdk/adoptopenjdk-8u252-darwin", - "/jdk/adoptopenjdk-8u252-linux", - "/jdk/adoptopenjdk-8u252-linux-aarch64", - "/jdk/adoptopenjdk-8u252-windows", - "/jdk/adoptopenjdk-8u262-darwin", - "/jdk/adoptopenjdk-8u262-linux", - "/jdk/adoptopenjdk-8u262-linux-aarch64", - "/jdk/adoptopenjdk-8u262-windows", - "/jdk/adoptopenjdk-8u265-darwin", - "/jdk/adoptopenjdk-8u265-linux", - "/jdk/adoptopenjdk-8u265-linux-aarch64", - "/jdk/adoptopenjdk-8u265-windows", - "/jdk/adoptopenjdk-8u265-windows-x86_32", - "/jdk/adoptopenjdk-8u272-darwin", - "/jdk/adoptopenjdk-8u272-linux", - "/jdk/adoptopenjdk-8u272-linux-aarch64", - "/jdk/adoptopenjdk-8u272-windows", - "/jdk/adoptopenjdk-8u272-windows-x86_32", - "/jdk/adoptopenjdk-8u275-darwin", - "/jdk/adoptopenjdk-8u275-linux", - "/jdk/adoptopenjdk-8u275-linux-aarch64", - "/jdk/adoptopenjdk-8u275-windows", - "/jdk/adoptopenjdk-8u275-windows-x86_32", - "/jdk/adoptopenjdk-8u282-darwin", - "/jdk/adoptopenjdk-8u282-linux", - "/jdk/adoptopenjdk-8u282-windows", - "/jdk/adoptopenjdk-8u282-windows-x86_32", - "/jdk/adoptopenjdk-8u292-darwin", - "/jdk/adoptopenjdk-8u292-linux", - "/jdk/adoptopenjdk-8u292-linux-aarch64", - "/jdk/adoptopenjdk-8u292-windows", - "/jdk/adoptopenjdk-8u292-windows-x86_32", - "/jdk/adoptopenjdk-openj9-11.0.12+7-linux", - "/jdk/adoptopenjdk-openj9-11.0.12+7-windows", - "/jdk/adoptopenjdk-openj9-11.0.13+8-linux", - "/jdk/adoptopenjdk-openj9-11.0.13+8-windows", - "/jdk/adoptopenjdk-openj9-11.0.14+9-linux", - "/jdk/adoptopenjdk-openj9-11.0.14+9-windows", - "/jdk/adoptopenjdk-openj9-11.0.14.1+1-linux", - "/jdk/adoptopenjdk-openj9-11.0.14.1+1-windows", - "/jdk/adoptopenjdk-openj9-11.0.15+10-linux", - "/jdk/adoptopenjdk-openj9-11.0.15+10-windows", - "/jdk/adoptopenjdk-openj9-11.0.16+8-linux", - "/jdk/adoptopenjdk-openj9-11.0.16+8-windows", - "/jdk/adoptopenjdk-openj9-11.0.16.1+1-linux", - "/jdk/adoptopenjdk-openj9-11.0.16.1+1-windows", - "/jdk/adoptopenjdk-openj9-11.0.17+8-linux", - "/jdk/adoptopenjdk-openj9-11.0.17+8-windows", - "/jdk/adoptopenjdk-openj9-11.0.18+10-linux", - "/jdk/adoptopenjdk-openj9-11.0.18+10-windows", - "/jdk/adoptopenjdk-openj9-11.0.19+7-linux", - "/jdk/adoptopenjdk-openj9-11.0.19+7-windows", - "/jdk/adoptopenjdk-openj9-11.0.20+8-linux", - "/jdk/adoptopenjdk-openj9-11.0.20+8-windows", - "/jdk/adoptopenjdk-openj9-11.0.20.1+1-linux", - "/jdk/adoptopenjdk-openj9-11.0.20.1+1-windows", - "/jdk/adoptopenjdk-openj9-11.0.21+9-linux", - "/jdk/adoptopenjdk-openj9-11.0.21+9-windows", - "/jdk/adoptopenjdk-openj9-11.0.22+7-linux", - "/jdk/adoptopenjdk-openj9-11.0.22+7-windows", - "/jdk/adoptopenjdk-openj9-11.0.23+9-linux", - "/jdk/adoptopenjdk-openj9-11.0.23+9-windows", - "/jdk/adoptopenjdk-openj9-8u302-linux", - "/jdk/adoptopenjdk-openj9-8u302-windows", - "/jdk/adoptopenjdk-openj9-8u312-linux", - "/jdk/adoptopenjdk-openj9-8u312-windows", - "/jdk/adoptopenjdk-openj9-8u322-linux", - "/jdk/adoptopenjdk-openj9-8u322-windows", - "/jdk/adoptopenjdk-openj9-8u332-linux", - "/jdk/adoptopenjdk-openj9-8u332-windows", - "/jdk/adoptopenjdk-openj9-8u345-linux", - "/jdk/adoptopenjdk-openj9-8u345-windows", - "/jdk/adoptopenjdk-openj9-8u352-linux", - "/jdk/adoptopenjdk-openj9-8u352-windows", - "/jdk/adoptopenjdk-openj9-8u362-linux", - "/jdk/adoptopenjdk-openj9-8u362-windows", - "/jdk/adoptopenjdk-openj9-8u372-linux", - "/jdk/adoptopenjdk-openj9-8u372-windows", - "/jdk/adoptopenjdk-openj9-8u382-linux", - "/jdk/adoptopenjdk-openj9-8u382-windows", - "/jdk/adoptopenjdk-openj9-8u392-linux", - "/jdk/adoptopenjdk-openj9-8u392-windows", - "/jdk/adoptopenjdk-openj9-8u402-linux", - "/jdk/adoptopenjdk-openj9-8u402-windows", - "/jdk/adoptopenjdk-openj9-8u412-linux", - "/jdk/adoptopenjdk-openj9-8u412-windows", - "/jdk/amazon-corretto-11.0.10.9.1-darwin", - "/jdk/amazon-corretto-11.0.10.9.1-linux", - "/jdk/amazon-corretto-11.0.10.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.10.9.1-windows", - "/jdk/amazon-corretto-11.0.11.9.1-darwin", - "/jdk/amazon-corretto-11.0.11.9.1-linux", - "/jdk/amazon-corretto-11.0.11.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.11.9.1-windows", - "/jdk/amazon-corretto-11.0.12.7.1-linux", - "/jdk/amazon-corretto-11.0.12.7.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.12.7.1-windows", - "/jdk/amazon-corretto-11.0.12.7.2-darwin", - "/jdk/amazon-corretto-11.0.13.8.1-darwin", - "/jdk/amazon-corretto-11.0.13.8.1-linux", - "/jdk/amazon-corretto-11.0.13.8.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.13.8.1-windows", - "/jdk/amazon-corretto-11.0.14.10.1-darwin", - "/jdk/amazon-corretto-11.0.14.10.1-linux", - "/jdk/amazon-corretto-11.0.14.10.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.14.10.1-windows", - "/jdk/amazon-corretto-11.0.14.9.1-darwin", - "/jdk/amazon-corretto-11.0.14.9.1-linux", - "/jdk/amazon-corretto-11.0.14.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.14.9.1-windows", - "/jdk/amazon-corretto-11.0.15.2.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.15.9.1-darwin", - "/jdk/amazon-corretto-11.0.15.9.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.15.9.1-linux", - "/jdk/amazon-corretto-11.0.15.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.15.9.1-windows", - "/jdk/amazon-corretto-11.0.16.8.1-darwin", - "/jdk/amazon-corretto-11.0.16.8.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.16.8.1-linux", - "/jdk/amazon-corretto-11.0.16.8.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.16.8.1-windows", - "/jdk/amazon-corretto-11.0.16.8.3-darwin", - "/jdk/amazon-corretto-11.0.16.8.3-darwin-aarch64", - "/jdk/amazon-corretto-11.0.16.9.1-darwin", - "/jdk/amazon-corretto-11.0.16.9.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.16.9.1-linux", - "/jdk/amazon-corretto-11.0.16.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.16.9.1-windows", - "/jdk/amazon-corretto-11.0.17.8.1-darwin", - "/jdk/amazon-corretto-11.0.17.8.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.17.8.1-linux", - "/jdk/amazon-corretto-11.0.17.8.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.17.8.1-windows", - "/jdk/amazon-corretto-11.0.18.10.1-darwin", - "/jdk/amazon-corretto-11.0.18.10.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.18.10.1-linux", - "/jdk/amazon-corretto-11.0.18.10.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.18.10.1-windows", - "/jdk/amazon-corretto-11.0.19.7.1-darwin", - "/jdk/amazon-corretto-11.0.19.7.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.19.7.1-linux", - "/jdk/amazon-corretto-11.0.19.7.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.19.7.1-windows", - "/jdk/amazon-corretto-11.0.20.8.1-darwin", - "/jdk/amazon-corretto-11.0.20.8.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.20.8.1-linux", - "/jdk/amazon-corretto-11.0.20.8.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.20.8.1-windows", - "/jdk/amazon-corretto-11.0.20.9.1-darwin", - "/jdk/amazon-corretto-11.0.20.9.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.20.9.1-linux", - "/jdk/amazon-corretto-11.0.20.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.20.9.1-windows", - "/jdk/amazon-corretto-11.0.21.9.1-darwin", - "/jdk/amazon-corretto-11.0.21.9.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.21.9.1-linux", - "/jdk/amazon-corretto-11.0.21.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.21.9.1-windows", - "/jdk/amazon-corretto-11.0.22.7.1-darwin", - "/jdk/amazon-corretto-11.0.22.7.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.22.7.1-linux", - "/jdk/amazon-corretto-11.0.22.7.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.22.7.1-windows", - "/jdk/amazon-corretto-11.0.23.9.1-darwin", - "/jdk/amazon-corretto-11.0.23.9.1-darwin-aarch64", - "/jdk/amazon-corretto-11.0.23.9.1-linux", - "/jdk/amazon-corretto-11.0.23.9.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.23.9.1-windows", - "/jdk/amazon-corretto-11.0.3.7.1-darwin", - "/jdk/amazon-corretto-11.0.3.7.1-linux", - "/jdk/amazon-corretto-11.0.3.7.1-windows", - "/jdk/amazon-corretto-11.0.4.11.1-darwin", - "/jdk/amazon-corretto-11.0.4.11.1-linux", - "/jdk/amazon-corretto-11.0.4.11.1-windows", - "/jdk/amazon-corretto-11.0.5.10.1-linux", - "/jdk/amazon-corretto-11.0.5.10.1-windows", - "/jdk/amazon-corretto-11.0.5.10.2-darwin", - "/jdk/amazon-corretto-11.0.7.10.1+1-darwin", - "/jdk/amazon-corretto-11.0.7.10.1-darwin", - "/jdk/amazon-corretto-11.0.7.10.1-linux", - "/jdk/amazon-corretto-11.0.7.10.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.7.10.1-windows", - "/jdk/amazon-corretto-11.0.8.10.1-darwin", - "/jdk/amazon-corretto-11.0.8.10.1-linux", - "/jdk/amazon-corretto-11.0.8.10.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.8.10.1-windows", - "/jdk/amazon-corretto-11.0.9.11.1+1-linux", - "/jdk/amazon-corretto-11.0.9.11.1+1-linux-aarch64", - "/jdk/amazon-corretto-11.0.9.11.1-darwin", - "/jdk/amazon-corretto-11.0.9.11.2-windows", - "/jdk/amazon-corretto-11.0.9.12.1-darwin", - "/jdk/amazon-corretto-11.0.9.12.1-linux", - "/jdk/amazon-corretto-11.0.9.12.1-linux-aarch64", - "/jdk/amazon-corretto-11.0.9.12.1-windows", - "/jdk/amazon-corretto-17.0.0.35.1-darwin", - "/jdk/amazon-corretto-17.0.0.35.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.0.35.1-linux", - "/jdk/amazon-corretto-17.0.0.35.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.0.35.1-windows", - "/jdk/amazon-corretto-17.0.0.35.2-darwin", - "/jdk/amazon-corretto-17.0.0.35.2-darwin-aarch64", - "/jdk/amazon-corretto-17.0.1.12.1-darwin", - "/jdk/amazon-corretto-17.0.1.12.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.1.12.1-linux", - "/jdk/amazon-corretto-17.0.1.12.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.1.12.1-windows", - "/jdk/amazon-corretto-17.0.10.7.1-darwin", - "/jdk/amazon-corretto-17.0.10.7.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.10.7.1-linux", - "/jdk/amazon-corretto-17.0.10.7.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.10.7.1-windows", - "/jdk/amazon-corretto-17.0.10.8.1-linux", - "/jdk/amazon-corretto-17.0.10.8.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.11.9.1-darwin", - "/jdk/amazon-corretto-17.0.11.9.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.11.9.1-linux", - "/jdk/amazon-corretto-17.0.11.9.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.11.9.1-windows", - "/jdk/amazon-corretto-17.0.2.8.1-darwin", - "/jdk/amazon-corretto-17.0.2.8.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.2.8.1-linux", - "/jdk/amazon-corretto-17.0.2.8.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.2.8.1-windows", - "/jdk/amazon-corretto-17.0.3.6.1-darwin", - "/jdk/amazon-corretto-17.0.3.6.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.3.6.1-linux", - "/jdk/amazon-corretto-17.0.3.6.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.3.6.1-windows", - "/jdk/amazon-corretto-17.0.4.8.1-darwin", - "/jdk/amazon-corretto-17.0.4.8.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.4.8.1-linux", - "/jdk/amazon-corretto-17.0.4.8.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.4.8.1-windows", - "/jdk/amazon-corretto-17.0.4.9.1-darwin", - "/jdk/amazon-corretto-17.0.4.9.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.4.9.1-linux", - "/jdk/amazon-corretto-17.0.4.9.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.4.9.1-windows", - "/jdk/amazon-corretto-17.0.5.8.1-darwin", - "/jdk/amazon-corretto-17.0.5.8.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.5.8.1-linux", - "/jdk/amazon-corretto-17.0.5.8.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.5.8.1-windows", - "/jdk/amazon-corretto-17.0.6.10.1-darwin", - "/jdk/amazon-corretto-17.0.6.10.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.6.10.1-linux", - "/jdk/amazon-corretto-17.0.6.10.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.6.10.1-windows", - "/jdk/amazon-corretto-17.0.7.7.1-darwin", - "/jdk/amazon-corretto-17.0.7.7.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.7.7.1-linux", - "/jdk/amazon-corretto-17.0.7.7.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.7.7.1-windows", - "/jdk/amazon-corretto-17.0.8.7.1-darwin", - "/jdk/amazon-corretto-17.0.8.7.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.8.7.1-linux", - "/jdk/amazon-corretto-17.0.8.7.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.8.7.1-windows", - "/jdk/amazon-corretto-17.0.8.8.1-darwin", - "/jdk/amazon-corretto-17.0.8.8.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.8.8.1-linux", - "/jdk/amazon-corretto-17.0.8.8.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.8.8.1-windows", - "/jdk/amazon-corretto-17.0.9.8.1-darwin", - "/jdk/amazon-corretto-17.0.9.8.1-darwin-aarch64", - "/jdk/amazon-corretto-17.0.9.8.1-linux", - "/jdk/amazon-corretto-17.0.9.8.1-linux-aarch64", - "/jdk/amazon-corretto-17.0.9.8.1-windows", - "/jdk/amazon-corretto-21.0.0.34.1-darwin", - "/jdk/amazon-corretto-21.0.0.34.1-darwin-aarch64", - "/jdk/amazon-corretto-21.0.0.34.1-linux", - "/jdk/amazon-corretto-21.0.0.34.1-linux-aarch64", - "/jdk/amazon-corretto-21.0.0.34.1-windows", - "/jdk/amazon-corretto-21.0.0.35.1-darwin", - "/jdk/amazon-corretto-21.0.0.35.1-darwin-aarch64", - "/jdk/amazon-corretto-21.0.0.35.1-linux", - "/jdk/amazon-corretto-21.0.0.35.1-linux-aarch64", - "/jdk/amazon-corretto-21.0.0.35.1-windows", - "/jdk/amazon-corretto-21.0.1.12.1-darwin", - "/jdk/amazon-corretto-21.0.1.12.1-darwin-aarch64", - "/jdk/amazon-corretto-21.0.1.12.1-linux", - "/jdk/amazon-corretto-21.0.1.12.1-linux-aarch64", - "/jdk/amazon-corretto-21.0.1.12.1-windows", - "/jdk/amazon-corretto-21.0.2.13.1-darwin", - "/jdk/amazon-corretto-21.0.2.13.1-darwin-aarch64", - "/jdk/amazon-corretto-21.0.2.13.1-linux", - "/jdk/amazon-corretto-21.0.2.13.1-linux-aarch64", - "/jdk/amazon-corretto-21.0.2.13.1-windows", - "/jdk/amazon-corretto-21.0.2.14.1-linux", - "/jdk/amazon-corretto-21.0.2.14.1-linux-aarch64", - "/jdk/amazon-corretto-21.0.3.9.1-darwin", - "/jdk/amazon-corretto-21.0.3.9.1-darwin-aarch64", - "/jdk/amazon-corretto-21.0.3.9.1-linux", - "/jdk/amazon-corretto-21.0.3.9.1-linux-aarch64", - "/jdk/amazon-corretto-21.0.3.9.1-windows", - "/jdk/amazon-corretto-8.212.04.1-darwin", - "/jdk/amazon-corretto-8.212.04.1-linux", - "/jdk/amazon-corretto-8.212.04.1-windows", - "/jdk/amazon-corretto-8.212.04.2-darwin", - "/jdk/amazon-corretto-8.212.04.2-linux", - "/jdk/amazon-corretto-8.212.04.2-windows", - "/jdk/amazon-corretto-8.222.10.1-darwin", - "/jdk/amazon-corretto-8.222.10.1-linux", - "/jdk/amazon-corretto-8.222.10.3-windows", - "/jdk/amazon-corretto-8.232.09.1-linux", - "/jdk/amazon-corretto-8.232.09.1-windows", - "/jdk/amazon-corretto-8.232.09.2-darwin", - "/jdk/amazon-corretto-8.272.10.1-darwin", - "/jdk/amazon-corretto-8.272.10.1-linux", - "/jdk/amazon-corretto-8.272.10.1-linux-aarch64", - "/jdk/amazon-corretto-8.272.10.1-windows", - "/jdk/amazon-corretto-8.272.10.3-darwin", - "/jdk/amazon-corretto-8.272.10.3-linux", - "/jdk/amazon-corretto-8.272.10.3-linux-aarch64", - "/jdk/amazon-corretto-8.272.10.3-windows", - "/jdk/amazon-corretto-8.275.01.1-darwin", - "/jdk/amazon-corretto-8.275.01.1-linux", - "/jdk/amazon-corretto-8.275.01.1-linux-aarch64", - "/jdk/amazon-corretto-8.275.01.1-windows", - "/jdk/amazon-corretto-8.282.08.1-darwin", - "/jdk/amazon-corretto-8.282.08.1-linux", - "/jdk/amazon-corretto-8.282.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.282.08.1-windows", - "/jdk/amazon-corretto-8.292.10.1-darwin", - "/jdk/amazon-corretto-8.292.10.1-linux", - "/jdk/amazon-corretto-8.292.10.1-linux-aarch64", - "/jdk/amazon-corretto-8.292.10.1-windows", - "/jdk/amazon-corretto-8.292.10.2-linux-aarch64", - "/jdk/amazon-corretto-8.302.08.1-darwin", - "/jdk/amazon-corretto-8.302.08.1-linux", - "/jdk/amazon-corretto-8.302.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.302.08.1-windows", - "/jdk/amazon-corretto-8.312.07.1-darwin", - "/jdk/amazon-corretto-8.312.07.1-linux", - "/jdk/amazon-corretto-8.312.07.1-linux-aarch64", - "/jdk/amazon-corretto-8.312.07.1-windows", - "/jdk/amazon-corretto-8.322.06.1-darwin", - "/jdk/amazon-corretto-8.322.06.1-linux", - "/jdk/amazon-corretto-8.322.06.1-linux-aarch64", - "/jdk/amazon-corretto-8.322.06.1-windows", - "/jdk/amazon-corretto-8.322.06.2-linux", - "/jdk/amazon-corretto-8.322.06.2-linux-aarch64", - "/jdk/amazon-corretto-8.322.06.4-darwin-aarch64", - "/jdk/amazon-corretto-8.332.08.1-darwin", - "/jdk/amazon-corretto-8.332.08.1-darwin-aarch64", - "/jdk/amazon-corretto-8.332.08.1-linux", - "/jdk/amazon-corretto-8.332.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.332.08.1-windows", - "/jdk/amazon-corretto-8.342.07.1-darwin", - "/jdk/amazon-corretto-8.342.07.1-darwin-aarch64", - "/jdk/amazon-corretto-8.342.07.1-linux", - "/jdk/amazon-corretto-8.342.07.1-linux-aarch64", - "/jdk/amazon-corretto-8.342.07.1-windows", - "/jdk/amazon-corretto-8.342.07.3-darwin", - "/jdk/amazon-corretto-8.342.07.3-darwin-aarch64", - "/jdk/amazon-corretto-8.342.07.3-linux", - "/jdk/amazon-corretto-8.342.07.3-linux-aarch64", - "/jdk/amazon-corretto-8.342.07.3-windows", - "/jdk/amazon-corretto-8.342.07.4-linux", - "/jdk/amazon-corretto-8.342.07.4-linux-aarch64", - "/jdk/amazon-corretto-8.352.08.1-darwin", - "/jdk/amazon-corretto-8.352.08.1-darwin-aarch64", - "/jdk/amazon-corretto-8.352.08.1-linux", - "/jdk/amazon-corretto-8.352.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.352.08.1-windows", - "/jdk/amazon-corretto-8.362.08.1-darwin", - "/jdk/amazon-corretto-8.362.08.1-darwin-aarch64", - "/jdk/amazon-corretto-8.362.08.1-linux", - "/jdk/amazon-corretto-8.362.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.362.08.1-windows", - "/jdk/amazon-corretto-8.372.07.1-darwin", - "/jdk/amazon-corretto-8.372.07.1-darwin-aarch64", - "/jdk/amazon-corretto-8.372.07.1-linux", - "/jdk/amazon-corretto-8.372.07.1-linux-aarch64", - "/jdk/amazon-corretto-8.372.07.1-windows", - "/jdk/amazon-corretto-8.382.05.1-darwin", - "/jdk/amazon-corretto-8.382.05.1-darwin-aarch64", - "/jdk/amazon-corretto-8.382.05.1-linux", - "/jdk/amazon-corretto-8.382.05.1-linux-aarch64", - "/jdk/amazon-corretto-8.382.05.1-windows", - "/jdk/amazon-corretto-8.392.08.1-darwin", - "/jdk/amazon-corretto-8.392.08.1-darwin-aarch64", - "/jdk/amazon-corretto-8.392.08.1-linux", - "/jdk/amazon-corretto-8.392.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.392.08.1-windows", - "/jdk/amazon-corretto-8.402.06.1-linux", - "/jdk/amazon-corretto-8.402.06.1-linux-aarch64", - "/jdk/amazon-corretto-8.402.06.1-windows", - "/jdk/amazon-corretto-8.402.07.1-darwin", - "/jdk/amazon-corretto-8.402.07.1-darwin-aarch64", - "/jdk/amazon-corretto-8.402.08.1-darwin", - "/jdk/amazon-corretto-8.402.08.1-darwin-aarch64", - "/jdk/amazon-corretto-8.402.08.1-linux", - "/jdk/amazon-corretto-8.402.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.402.08.1-windows", - "/jdk/amazon-corretto-8.412.08.1-darwin", - "/jdk/amazon-corretto-8.412.08.1-darwin-aarch64", - "/jdk/amazon-corretto-8.412.08.1-linux", - "/jdk/amazon-corretto-8.412.08.1-linux-aarch64", - "/jdk/amazon-corretto-8.412.08.1-windows", - "/jdk/graalvm-ce-11-19.3.0-darwin", - "/jdk/graalvm-ce-11-19.3.0-linux", - "/jdk/graalvm-ce-11-19.3.0-linux-aarch64", - "/jdk/graalvm-ce-11-19.3.0-windows", - "/jdk/graalvm-ce-11-19.3.0.2-darwin", - "/jdk/graalvm-ce-11-19.3.0.2-linux", - "/jdk/graalvm-ce-11-19.3.0.2-linux-aarch64", - "/jdk/graalvm-ce-11-19.3.0.2-windows", - "/jdk/graalvm-ce-11-19.3.1-darwin", - "/jdk/graalvm-ce-11-19.3.1-linux", - "/jdk/graalvm-ce-11-19.3.1-linux-aarch64", - "/jdk/graalvm-ce-11-19.3.1-windows", - "/jdk/graalvm-ce-11-19.3.2-darwin", - "/jdk/graalvm-ce-11-19.3.2-linux", - "/jdk/graalvm-ce-11-19.3.2-linux-aarch64", - "/jdk/graalvm-ce-11-19.3.2-windows", - "/jdk/graalvm-ce-11-19.3.3-darwin", - "/jdk/graalvm-ce-11-19.3.3-linux", - "/jdk/graalvm-ce-11-19.3.3-linux-aarch64", - "/jdk/graalvm-ce-11-19.3.3-windows", - "/jdk/graalvm-ce-11-19.3.4-darwin", - "/jdk/graalvm-ce-11-19.3.4-linux", - "/jdk/graalvm-ce-11-19.3.4-linux-aarch64", - "/jdk/graalvm-ce-11-19.3.4-windows", - "/jdk/graalvm-ce-11-19.3.5-darwin", - "/jdk/graalvm-ce-11-19.3.5-linux", - "/jdk/graalvm-ce-11-19.3.5-linux-aarch64", - "/jdk/graalvm-ce-11-19.3.5-windows", - "/jdk/graalvm-ce-11-19.3.6-darwin", - "/jdk/graalvm-ce-11-19.3.6-linux", - "/jdk/graalvm-ce-11-19.3.6-windows", - "/jdk/graalvm-ce-11-20.0.0-darwin", - "/jdk/graalvm-ce-11-20.0.0-linux", - "/jdk/graalvm-ce-11-20.0.0-linux-aarch64", - "/jdk/graalvm-ce-11-20.0.0-windows", - "/jdk/graalvm-ce-11-20.1.0-darwin", - "/jdk/graalvm-ce-11-20.1.0-linux", - "/jdk/graalvm-ce-11-20.1.0-linux-aarch64", - "/jdk/graalvm-ce-11-20.1.0-windows", - "/jdk/graalvm-ce-11-20.2.0-darwin", - "/jdk/graalvm-ce-11-20.2.0-linux", - "/jdk/graalvm-ce-11-20.2.0-linux-aarch64", - "/jdk/graalvm-ce-11-20.2.0-windows", - "/jdk/graalvm-ce-11-20.3.0-darwin", - "/jdk/graalvm-ce-11-20.3.0-linux", - "/jdk/graalvm-ce-11-20.3.0-linux-aarch64", - "/jdk/graalvm-ce-11-20.3.0-windows", - "/jdk/graalvm-ce-11-20.3.1-darwin", - "/jdk/graalvm-ce-11-20.3.1-linux", - "/jdk/graalvm-ce-11-20.3.1-linux-aarch64", - "/jdk/graalvm-ce-11-20.3.1-windows", - "/jdk/graalvm-ce-11-20.3.1.2-darwin", - "/jdk/graalvm-ce-11-20.3.1.2-linux", - "/jdk/graalvm-ce-11-20.3.1.2-linux-aarch64", - "/jdk/graalvm-ce-11-20.3.1.2-windows", - "/jdk/graalvm-ce-11-20.3.2-darwin", - "/jdk/graalvm-ce-11-20.3.2-linux", - "/jdk/graalvm-ce-11-20.3.2-windows", - "/jdk/graalvm-ce-11-20.3.3-darwin", - "/jdk/graalvm-ce-11-20.3.3-linux", - "/jdk/graalvm-ce-11-20.3.3-windows", - "/jdk/graalvm-ce-11-20.3.4-darwin", - "/jdk/graalvm-ce-11-20.3.4-linux", - "/jdk/graalvm-ce-11-20.3.4-windows", - "/jdk/graalvm-ce-11-20.3.5-darwin", - "/jdk/graalvm-ce-11-20.3.5-linux", - "/jdk/graalvm-ce-11-20.3.5-windows", - "/jdk/graalvm-ce-11-20.3.6-darwin", - "/jdk/graalvm-ce-11-20.3.6-linux", - "/jdk/graalvm-ce-11-20.3.6-windows", - "/jdk/graalvm-ce-11-21.0.0-darwin", - "/jdk/graalvm-ce-11-21.0.0-linux", - "/jdk/graalvm-ce-11-21.0.0-linux-aarch64", - "/jdk/graalvm-ce-11-21.0.0-windows", - "/jdk/graalvm-ce-11-21.0.0.2-darwin", - "/jdk/graalvm-ce-11-21.0.0.2-linux", - "/jdk/graalvm-ce-11-21.0.0.2-linux-aarch64", - "/jdk/graalvm-ce-11-21.0.0.2-windows", - "/jdk/graalvm-ce-11-21.1.0-darwin", - "/jdk/graalvm-ce-11-21.1.0-linux", - "/jdk/graalvm-ce-11-21.1.0-linux-aarch64", - "/jdk/graalvm-ce-11-21.1.0-windows", - "/jdk/graalvm-ce-11-21.2.0-darwin", - "/jdk/graalvm-ce-11-21.2.0-linux", - "/jdk/graalvm-ce-11-21.2.0-linux-aarch64", - "/jdk/graalvm-ce-11-21.2.0-windows", - "/jdk/graalvm-ce-11-21.3.0-darwin", - "/jdk/graalvm-ce-11-21.3.0-linux", - "/jdk/graalvm-ce-11-21.3.0-linux-aarch64", - "/jdk/graalvm-ce-11-21.3.0-windows", - "/jdk/graalvm-ce-11-21.3.1-darwin", - "/jdk/graalvm-ce-11-21.3.1-linux", - "/jdk/graalvm-ce-11-21.3.1-linux-aarch64", - "/jdk/graalvm-ce-11-21.3.1-windows", - "/jdk/graalvm-ce-11-21.3.2-darwin", - "/jdk/graalvm-ce-11-21.3.2-linux", - "/jdk/graalvm-ce-11-21.3.2-linux-aarch64", - "/jdk/graalvm-ce-11-21.3.2-windows", - "/jdk/graalvm-ce-11-21.3.3-darwin", - "/jdk/graalvm-ce-11-21.3.3-linux", - "/jdk/graalvm-ce-11-21.3.3-linux-aarch64", - "/jdk/graalvm-ce-11-21.3.3-windows", - "/jdk/graalvm-ce-11-21.3.3.1-darwin", - "/jdk/graalvm-ce-11-21.3.3.1-linux", - "/jdk/graalvm-ce-11-21.3.3.1-linux-aarch64", - "/jdk/graalvm-ce-11-21.3.3.1-windows", - "/jdk/graalvm-ce-11-22.0.0.2-darwin", - "/jdk/graalvm-ce-11-22.0.0.2-linux", - "/jdk/graalvm-ce-11-22.0.0.2-linux-aarch64", - "/jdk/graalvm-ce-11-22.0.0.2-windows", - "/jdk/graalvm-ce-11-22.1.0-darwin", - "/jdk/graalvm-ce-11-22.1.0-darwin-aarch64", - "/jdk/graalvm-ce-11-22.1.0-linux", - "/jdk/graalvm-ce-11-22.1.0-linux-aarch64", - "/jdk/graalvm-ce-11-22.1.0-windows", - "/jdk/graalvm-ce-11-22.2.0-darwin", - "/jdk/graalvm-ce-11-22.2.0-darwin-aarch64", - "/jdk/graalvm-ce-11-22.2.0-linux", - "/jdk/graalvm-ce-11-22.2.0-linux-aarch64", - "/jdk/graalvm-ce-11-22.2.0-windows", - "/jdk/graalvm-ce-11-22.3.0-darwin", - "/jdk/graalvm-ce-11-22.3.0-darwin-aarch64", - "/jdk/graalvm-ce-11-22.3.0-linux", - "/jdk/graalvm-ce-11-22.3.0-linux-aarch64", - "/jdk/graalvm-ce-11-22.3.0-windows", - "/jdk/graalvm-ce-11-22.3.1-darwin", - "/jdk/graalvm-ce-11-22.3.1-darwin-aarch64", - "/jdk/graalvm-ce-11-22.3.1-linux", - "/jdk/graalvm-ce-11-22.3.1-linux-aarch64", - "/jdk/graalvm-ce-11-22.3.1-windows", - "/jdk/graalvm-ce-11-22.3.2-darwin", - "/jdk/graalvm-ce-11-22.3.2-linux", - "/jdk/graalvm-ce-11-22.3.2-linux-aarch64", - "/jdk/graalvm-ce-11-22.3.2-windows", - "/jdk/graalvm-ce-11-22.3.3-darwin", - "/jdk/graalvm-ce-11-22.3.3-linux", - "/jdk/graalvm-ce-11-22.3.3-linux-aarch64", - "/jdk/graalvm-ce-11-22.3.3-windows", - "/jdk/graalvm-ce-17-21.3.0-darwin", - "/jdk/graalvm-ce-17-21.3.0-linux", - "/jdk/graalvm-ce-17-21.3.0-linux-aarch64", - "/jdk/graalvm-ce-17-21.3.0-windows", - "/jdk/graalvm-ce-17-21.3.1-darwin", - "/jdk/graalvm-ce-17-21.3.1-linux", - "/jdk/graalvm-ce-17-21.3.1-linux-aarch64", - "/jdk/graalvm-ce-17-21.3.1-windows", - "/jdk/graalvm-ce-17-21.3.2-darwin", - "/jdk/graalvm-ce-17-21.3.2-linux", - "/jdk/graalvm-ce-17-21.3.2-linux-aarch64", - "/jdk/graalvm-ce-17-21.3.2-windows", - "/jdk/graalvm-ce-17-21.3.3-darwin", - "/jdk/graalvm-ce-17-21.3.3-linux", - "/jdk/graalvm-ce-17-21.3.3-linux-aarch64", - "/jdk/graalvm-ce-17-21.3.3-windows", - "/jdk/graalvm-ce-17-21.3.3.1-darwin", - "/jdk/graalvm-ce-17-21.3.3.1-linux", - "/jdk/graalvm-ce-17-21.3.3.1-linux-aarch64", - "/jdk/graalvm-ce-17-21.3.3.1-windows", - "/jdk/graalvm-ce-17-22.0.0.2-darwin", - "/jdk/graalvm-ce-17-22.0.0.2-linux", - "/jdk/graalvm-ce-17-22.0.0.2-linux-aarch64", - "/jdk/graalvm-ce-17-22.0.0.2-windows", - "/jdk/graalvm-ce-17-22.1.0-darwin", - "/jdk/graalvm-ce-17-22.1.0-darwin-aarch64", - "/jdk/graalvm-ce-17-22.1.0-linux", - "/jdk/graalvm-ce-17-22.1.0-linux-aarch64", - "/jdk/graalvm-ce-17-22.1.0-windows", - "/jdk/graalvm-ce-17-22.2.0-darwin", - "/jdk/graalvm-ce-17-22.2.0-darwin-aarch64", - "/jdk/graalvm-ce-17-22.2.0-linux", - "/jdk/graalvm-ce-17-22.2.0-linux-aarch64", - "/jdk/graalvm-ce-17-22.2.0-windows", - "/jdk/graalvm-ce-17-22.3.0-darwin", - "/jdk/graalvm-ce-17-22.3.0-darwin-aarch64", - "/jdk/graalvm-ce-17-22.3.0-linux", - "/jdk/graalvm-ce-17-22.3.0-linux-aarch64", - "/jdk/graalvm-ce-17-22.3.0-windows", - "/jdk/graalvm-ce-17-22.3.1-darwin", - "/jdk/graalvm-ce-17-22.3.1-darwin-aarch64", - "/jdk/graalvm-ce-17-22.3.1-linux", - "/jdk/graalvm-ce-17-22.3.1-linux-aarch64", - "/jdk/graalvm-ce-17-22.3.1-windows", - "/jdk/graalvm-ce-17-22.3.2-darwin", - "/jdk/graalvm-ce-17-22.3.2-linux", - "/jdk/graalvm-ce-17-22.3.2-linux-aarch64", - "/jdk/graalvm-ce-17-22.3.2-windows", - "/jdk/graalvm-ce-17-22.3.3-darwin", - "/jdk/graalvm-ce-17-22.3.3-linux", - "/jdk/graalvm-ce-17-22.3.3-linux-aarch64", - "/jdk/graalvm-ce-17-22.3.3-windows", - "/jdk/ibm-java-8.0-5.6-linux-x86_64", - "/jdk/ibm-java-8.0-6.25-linux-x86_64", - "/jdk/jdk-10-ea+35_darwin-x64", - "/jdk/jdk-10-ea+35_linux-x64", - "/jdk/jdk-10-ea+35_windows-x64", - "/jdk/jdk-10-ea+37_darwin-x64", - "/jdk/jdk-10-ea+37_linux-x64", - "/jdk/jdk-10-ea+37_windows-x64", - "/jdk/jdk-10-ea+42_darwin-x64", - "/jdk/jdk-10-ea+42_linux-x64", - "/jdk/jdk-10-ea+42_windows-x64", - "/jdk/jdk-8u101-linux-x64", - "/jdk/jdk-8u101-windows-x64", - "/jdk/jdk-8u102-linux-x64", - "/jdk/jdk-8u102-windows-x64", - "/jdk/jdk-8u121-linux-x64", - "/jdk/jdk-8u121-windows-x64", - "/jdk/jdk-8u131-linux-x64", - "/jdk/jdk-8u131-windows-x64", - "/jdk/jdk-8u141-darwin-x64", - "/jdk/jdk-8u141-linux-x64", - "/jdk/jdk-8u141-windows-x64", - "/jdk/jdk-8u144-darwin-x64", - "/jdk/jdk-8u144-linux-x64", - "/jdk/jdk-8u144-windows-x64", - "/jdk/jdk-8u151-darwin-x64", - "/jdk/jdk-8u151-linux-x64", - "/jdk/jdk-8u151-windows-x64", - "/jdk/jdk-8u152-darwin-x64", - "/jdk/jdk-8u152-linux-x64", - "/jdk/jdk-8u152-windows-x64", - "/jdk/jdk-8u161-darwin-x64", - "/jdk/jdk-8u161-linux-x64", - "/jdk/jdk-8u161-windows-x64", - "/jdk/jdk-8u162-darwin-x64", - "/jdk/jdk-8u162-linux-x64", - "/jdk/jdk-8u162-windows-x64", - "/jdk/jdk-8u20-linux-x64", - "/jdk/jdk-8u20-windows-x64", - "/jdk/jdk-8u45-linux-x64", - "/jdk/jdk-8u45-windows-x64", - "/jdk/jdk-9-ea+129_linux-x64", - "/jdk/jdk-9-ea+129_windows-x64", - "/jdk/jdk-9-ea+130_linux-x64", - "/jdk/jdk-9-ea+130_windows-x64", - "/jdk/jdk-9-ea+135_linux-x64", - "/jdk/jdk-9-ea+135_windows-x64", - "/jdk/jdk-9-ea+140_linux-x64", - "/jdk/jdk-9-ea+140_windows-x64", - "/jdk/jdk-9-ea+177_linux-x64", - "/jdk/jdk-9-ea+177_windows-x64", - "/jdk/jdk-9-ea+178_linux-x64", - "/jdk/jdk-9-ea+178_windows-x64", - "/jdk/jdk-9-ea+179_darwin-x64", - "/jdk/jdk-9-ea+179_linux-x64", - "/jdk/jdk-9-ea+179_windows-x64", - "/jdk/jdk-9-ea+180_darwin-x64", - "/jdk/jdk-9-ea+180_linux-x64", - "/jdk/jdk-9-ea+180_windows-x64", - "/jdk/jdk-9-ea+181_darwin-x64", - "/jdk/jdk-9-ea+181_linux-x64", - "/jdk/jdk-9-ea+181_windows-x64", - "/jdk/jdk-9.0.1+11_darwin-x64", - "/jdk/jdk-9.0.1+11_linux-x64", - "/jdk/jdk-9.0.1+11_windows-x64", - "/jdk/jdk-9.0.4+11_darwin-x64", - "/jdk/jdk-9.0.4+11_linux-x64", - "/jdk/jdk-9.0.4+11_windows-x64", - "/jdk/openjdk-10+43-darwin", - "/jdk/openjdk-10+43-linux", - "/jdk/openjdk-10+43-windows", - "/jdk/openjdk-10-darwin", - "/jdk/openjdk-10-linux", - "/jdk/openjdk-10-windows", - "/jdk/openjdk-10.0.1-darwin", - "/jdk/openjdk-10.0.1-linux", - "/jdk/openjdk-10.0.1-windows", - "/jdk/openjdk-10.0.2-darwin", - "/jdk/openjdk-10.0.2-linux", - "/jdk/openjdk-10.0.2-windows", - "/jdk/openjdk-11+11-darwin", - "/jdk/openjdk-11+11-linux", - "/jdk/openjdk-11+11-windows", - "/jdk/openjdk-11+12-darwin", - "/jdk/openjdk-11+12-linux", - "/jdk/openjdk-11+12-windows", - "/jdk/openjdk-11+13-darwin", - "/jdk/openjdk-11+13-linux", - "/jdk/openjdk-11+13-windows", - "/jdk/openjdk-11+14-darwin", - "/jdk/openjdk-11+14-linux", - "/jdk/openjdk-11+14-windows", - "/jdk/openjdk-11+15-darwin", - "/jdk/openjdk-11+15-linux", - "/jdk/openjdk-11+15-windows", - "/jdk/openjdk-11+16-darwin", - "/jdk/openjdk-11+16-linux", - "/jdk/openjdk-11+16-windows", - "/jdk/openjdk-11+17-darwin", - "/jdk/openjdk-11+17-linux", - "/jdk/openjdk-11+17-windows", - "/jdk/openjdk-11+18-darwin", - "/jdk/openjdk-11+18-linux", - "/jdk/openjdk-11+18-windows", - "/jdk/openjdk-11+19-darwin", - "/jdk/openjdk-11+19-linux", - "/jdk/openjdk-11+19-windows", - "/jdk/openjdk-11+20-darwin", - "/jdk/openjdk-11+20-linux", - "/jdk/openjdk-11+20-windows", - "/jdk/openjdk-11+21-darwin", - "/jdk/openjdk-11+21-linux", - "/jdk/openjdk-11+21-windows", - "/jdk/openjdk-11+22-darwin", - "/jdk/openjdk-11+22-linux", - "/jdk/openjdk-11+22-windows", - "/jdk/openjdk-11+23-darwin", - "/jdk/openjdk-11+23-linux", - "/jdk/openjdk-11+23-windows", - "/jdk/openjdk-11+24-darwin", - "/jdk/openjdk-11+24-linux", - "/jdk/openjdk-11+24-windows", - "/jdk/openjdk-11+25-darwin", - "/jdk/openjdk-11+25-linux", - "/jdk/openjdk-11+25-windows", - "/jdk/openjdk-11+26-darwin", - "/jdk/openjdk-11+26-linux", - "/jdk/openjdk-11+26-windows", - "/jdk/openjdk-11+27-darwin", - "/jdk/openjdk-11+27-linux", - "/jdk/openjdk-11+27-windows", - "/jdk/openjdk-11+28-darwin", - "/jdk/openjdk-11+28-linux", - "/jdk/openjdk-11+28-windows", - "/jdk/openjdk-11+5-darwin", - "/jdk/openjdk-11+5-linux", - "/jdk/openjdk-11+5-windows", - "/jdk/openjdk-11-darwin", - "/jdk/openjdk-11-linux", - "/jdk/openjdk-11-windows", - "/jdk/openjdk-11.0.1-darwin", - "/jdk/openjdk-11.0.1-linux", - "/jdk/openjdk-11.0.1-windows", - "/jdk/openjdk-11.0.2-darwin", - "/jdk/openjdk-11.0.2-linux", - "/jdk/openjdk-11.0.2-windows", - "/jdk/openjdk-12+23-darwin", - "/jdk/openjdk-12+23-linux", - "/jdk/openjdk-12+23-windows", - "/jdk/openjdk-12+24-darwin", - "/jdk/openjdk-12+24-linux", - "/jdk/openjdk-12+24-windows", - "/jdk/openjdk-12+25-darwin", - "/jdk/openjdk-12+25-linux", - "/jdk/openjdk-12+25-windows", - "/jdk/openjdk-12+27-darwin", - "/jdk/openjdk-12+27-linux", - "/jdk/openjdk-12+27-windows", - "/jdk/openjdk-12+28-darwin", - "/jdk/openjdk-12+28-linux", - "/jdk/openjdk-12+28-windows", - "/jdk/openjdk-12+29-darwin", - "/jdk/openjdk-12+29-linux", - "/jdk/openjdk-12+29-windows", - "/jdk/openjdk-12+30-darwin", - "/jdk/openjdk-12+30-linux", - "/jdk/openjdk-12+30-windows", - "/jdk/openjdk-12+31-darwin", - "/jdk/openjdk-12+31-linux", - "/jdk/openjdk-12+31-windows", - "/jdk/openjdk-12+32-darwin", - "/jdk/openjdk-12+32-linux", - "/jdk/openjdk-12+32-windows", - "/jdk/openjdk-12+33-darwin", - "/jdk/openjdk-12+33-linux", - "/jdk/openjdk-12+33-windows", - "/jdk/openjdk-12-darwin", - "/jdk/openjdk-12-linux", - "/jdk/openjdk-12-windows", - "/jdk/openjdk-12.0.1-darwin", - "/jdk/openjdk-12.0.1-linux", - "/jdk/openjdk-12.0.1-windows", - "/jdk/openjdk-12.0.2-darwin", - "/jdk/openjdk-12.0.2-linux", - "/jdk/openjdk-12.0.2-windows", - "/jdk/openjdk-13+14-darwin", - "/jdk/openjdk-13+14-linux", - "/jdk/openjdk-13+14-windows", - "/jdk/openjdk-13+15-darwin", - "/jdk/openjdk-13+15-linux", - "/jdk/openjdk-13+15-windows", - "/jdk/openjdk-13+16-darwin", - "/jdk/openjdk-13+16-linux", - "/jdk/openjdk-13+16-windows", - "/jdk/openjdk-13+17-darwin", - "/jdk/openjdk-13+17-linux", - "/jdk/openjdk-13+17-windows", - "/jdk/openjdk-13+18-darwin", - "/jdk/openjdk-13+18-linux", - "/jdk/openjdk-13+18-windows", - "/jdk/openjdk-13+19-darwin", - "/jdk/openjdk-13+19-linux", - "/jdk/openjdk-13+19-windows", - "/jdk/openjdk-13+20-darwin", - "/jdk/openjdk-13+20-linux", - "/jdk/openjdk-13+20-windows", - "/jdk/openjdk-13+21-darwin", - "/jdk/openjdk-13+21-linux", - "/jdk/openjdk-13+21-windows", - "/jdk/openjdk-13+22-darwin", - "/jdk/openjdk-13+22-linux", - "/jdk/openjdk-13+22-windows", - "/jdk/openjdk-13+23-darwin", - "/jdk/openjdk-13+23-linux", - "/jdk/openjdk-13+23-windows", - "/jdk/openjdk-13+24-darwin", - "/jdk/openjdk-13+24-linux", - "/jdk/openjdk-13+24-windows", - "/jdk/openjdk-13+25-darwin", - "/jdk/openjdk-13+25-linux", - "/jdk/openjdk-13+25-windows", - "/jdk/openjdk-13+26-darwin", - "/jdk/openjdk-13+26-linux", - "/jdk/openjdk-13+26-windows", - "/jdk/openjdk-13+27-darwin", - "/jdk/openjdk-13+27-linux", - "/jdk/openjdk-13+27-windows", - "/jdk/openjdk-13+28-darwin", - "/jdk/openjdk-13+28-linux", - "/jdk/openjdk-13+28-windows", - "/jdk/openjdk-13+29-darwin", - "/jdk/openjdk-13+29-linux", - "/jdk/openjdk-13+29-windows", - "/jdk/openjdk-13+30-darwin", - "/jdk/openjdk-13+30-linux", - "/jdk/openjdk-13+30-windows", - "/jdk/openjdk-13+31-darwin", - "/jdk/openjdk-13+31-linux", - "/jdk/openjdk-13+31-windows", - "/jdk/openjdk-13+32-darwin", - "/jdk/openjdk-13+32-linux", - "/jdk/openjdk-13+32-windows", - "/jdk/openjdk-13-darwin", - "/jdk/openjdk-13-linux", - "/jdk/openjdk-13-windows", - "/jdk/openjdk-13.0.1-darwin", - "/jdk/openjdk-13.0.1-linux", - "/jdk/openjdk-13.0.1-windows", - "/jdk/openjdk-13.0.2-darwin", - "/jdk/openjdk-13.0.2-linux", - "/jdk/openjdk-13.0.2-windows", - "/jdk/openjdk-14+10-darwin", - "/jdk/openjdk-14+10-linux", - "/jdk/openjdk-14+10-windows", - "/jdk/openjdk-14+11-darwin", - "/jdk/openjdk-14+11-linux", - "/jdk/openjdk-14+11-windows", - "/jdk/openjdk-14+12-darwin", - "/jdk/openjdk-14+12-linux", - "/jdk/openjdk-14+12-windows", - "/jdk/openjdk-14+13-darwin", - "/jdk/openjdk-14+13-linux", - "/jdk/openjdk-14+13-windows", - "/jdk/openjdk-14+14-darwin", - "/jdk/openjdk-14+14-linux", - "/jdk/openjdk-14+14-windows", - "/jdk/openjdk-14+15-darwin", - "/jdk/openjdk-14+15-linux", - "/jdk/openjdk-14+15-windows", - "/jdk/openjdk-14+16-darwin", - "/jdk/openjdk-14+16-linux", - "/jdk/openjdk-14+16-windows", - "/jdk/openjdk-14+17-darwin", - "/jdk/openjdk-14+17-linux", - "/jdk/openjdk-14+17-windows", - "/jdk/openjdk-14+25-darwin", - "/jdk/openjdk-14+25-linux", - "/jdk/openjdk-14+25-windows", - "/jdk/openjdk-14+26-darwin", - "/jdk/openjdk-14+26-linux", - "/jdk/openjdk-14+26-windows", - "/jdk/openjdk-14+27-darwin", - "/jdk/openjdk-14+27-linux", - "/jdk/openjdk-14+27-windows", - "/jdk/openjdk-14+28-darwin", - "/jdk/openjdk-14+28-linux", - "/jdk/openjdk-14+28-windows", - "/jdk/openjdk-14+30-darwin", - "/jdk/openjdk-14+30-linux", - "/jdk/openjdk-14+30-windows", - "/jdk/openjdk-14+31-darwin", - "/jdk/openjdk-14+31-linux", - "/jdk/openjdk-14+31-windows", - "/jdk/openjdk-14+32-darwin", - "/jdk/openjdk-14+32-linux", - "/jdk/openjdk-14+32-windows", - "/jdk/openjdk-14+33-darwin", - "/jdk/openjdk-14+33-linux", - "/jdk/openjdk-14+33-windows", - "/jdk/openjdk-14+34-darwin", - "/jdk/openjdk-14+34-linux", - "/jdk/openjdk-14+34-windows", - "/jdk/openjdk-14+9-darwin", - "/jdk/openjdk-14+9-linux", - "/jdk/openjdk-14+9-windows", - "/jdk/openjdk-14-darwin", - "/jdk/openjdk-14-linux", - "/jdk/openjdk-14-windows", - "/jdk/openjdk-14.0.1-darwin", - "/jdk/openjdk-14.0.1-linux", - "/jdk/openjdk-14.0.1-windows", - "/jdk/openjdk-14.0.2+12-darwin", - "/jdk/openjdk-14.0.2+12-linux", - "/jdk/openjdk-14.0.2+12-windows", - "/jdk/openjdk-14.0.2-darwin", - "/jdk/openjdk-14.0.2-linux", - "/jdk/openjdk-14.0.2-windows", - "/jdk/openjdk-15+10-darwin", - "/jdk/openjdk-15+10-linux", - "/jdk/openjdk-15+10-windows", - "/jdk/openjdk-15+11-darwin", - "/jdk/openjdk-15+11-linux", - "/jdk/openjdk-15+11-windows", - "/jdk/openjdk-15+12-darwin", - "/jdk/openjdk-15+12-linux", - "/jdk/openjdk-15+12-windows", - "/jdk/openjdk-15+13-darwin", - "/jdk/openjdk-15+13-linux", - "/jdk/openjdk-15+13-windows", - "/jdk/openjdk-15+14-darwin", - "/jdk/openjdk-15+14-linux", - "/jdk/openjdk-15+14-windows", - "/jdk/openjdk-15+15-darwin", - "/jdk/openjdk-15+15-linux", - "/jdk/openjdk-15+15-windows", - "/jdk/openjdk-15+16-darwin", - "/jdk/openjdk-15+16-linux", - "/jdk/openjdk-15+16-windows", - "/jdk/openjdk-15+17-darwin", - "/jdk/openjdk-15+17-linux", - "/jdk/openjdk-15+17-windows", - "/jdk/openjdk-15+18-darwin", - "/jdk/openjdk-15+18-linux", - "/jdk/openjdk-15+18-windows", - "/jdk/openjdk-15+19-darwin", - "/jdk/openjdk-15+19-linux", - "/jdk/openjdk-15+19-windows", - "/jdk/openjdk-15+20-darwin", - "/jdk/openjdk-15+20-linux", - "/jdk/openjdk-15+20-windows", - "/jdk/openjdk-15+21-darwin", - "/jdk/openjdk-15+21-linux", - "/jdk/openjdk-15+21-windows", - "/jdk/openjdk-15+22-darwin", - "/jdk/openjdk-15+22-linux", - "/jdk/openjdk-15+22-windows", - "/jdk/openjdk-15+23-darwin", - "/jdk/openjdk-15+23-linux", - "/jdk/openjdk-15+23-windows", - "/jdk/openjdk-15+24-darwin", - "/jdk/openjdk-15+24-linux", - "/jdk/openjdk-15+24-windows", - "/jdk/openjdk-15+25-darwin", - "/jdk/openjdk-15+25-linux", - "/jdk/openjdk-15+25-windows", - "/jdk/openjdk-15+26-darwin", - "/jdk/openjdk-15+26-linux", - "/jdk/openjdk-15+26-windows", - "/jdk/openjdk-15+27-darwin", - "/jdk/openjdk-15+27-linux", - "/jdk/openjdk-15+27-windows", - "/jdk/openjdk-15+28-darwin", - "/jdk/openjdk-15+28-linux", - "/jdk/openjdk-15+28-windows", - "/jdk/openjdk-15+29-darwin", - "/jdk/openjdk-15+29-linux", - "/jdk/openjdk-15+29-windows", - "/jdk/openjdk-15+30-darwin", - "/jdk/openjdk-15+30-linux", - "/jdk/openjdk-15+30-windows", - "/jdk/openjdk-15+31-darwin", - "/jdk/openjdk-15+31-linux", - "/jdk/openjdk-15+31-windows", - "/jdk/openjdk-15+32-darwin", - "/jdk/openjdk-15+32-linux", - "/jdk/openjdk-15+32-windows", - "/jdk/openjdk-15+33-darwin", - "/jdk/openjdk-15+33-linux", - "/jdk/openjdk-15+33-windows", - "/jdk/openjdk-15+34-darwin", - "/jdk/openjdk-15+34-linux", - "/jdk/openjdk-15+34-windows", - "/jdk/openjdk-15+36-darwin", - "/jdk/openjdk-15+36-linux", - "/jdk/openjdk-15+36-windows", - "/jdk/openjdk-15+4-darwin", - "/jdk/openjdk-15+4-linux", - "/jdk/openjdk-15+4-windows", - "/jdk/openjdk-15+5-darwin", - "/jdk/openjdk-15+5-linux", - "/jdk/openjdk-15+5-windows", - "/jdk/openjdk-15+6-darwin", - "/jdk/openjdk-15+6-linux", - "/jdk/openjdk-15+6-windows", - "/jdk/openjdk-15+7-darwin", - "/jdk/openjdk-15+7-linux", - "/jdk/openjdk-15+7-windows", - "/jdk/openjdk-15+8-darwin", - "/jdk/openjdk-15+8-linux", - "/jdk/openjdk-15+8-windows", - "/jdk/openjdk-15+9-darwin", - "/jdk/openjdk-15+9-linux", - "/jdk/openjdk-15+9-windows", - "/jdk/openjdk-15-darwin", - "/jdk/openjdk-15-linux", - "/jdk/openjdk-15-windows", - "/jdk/openjdk-15.0.1+9-darwin", - "/jdk/openjdk-15.0.1+9-linux", - "/jdk/openjdk-15.0.1+9-windows", - "/jdk/openjdk-15.0.2+7-darwin", - "/jdk/openjdk-15.0.2+7-linux", - "/jdk/openjdk-15.0.2+7-linux-aarch64", - "/jdk/openjdk-15.0.2+7-windows", - "/jdk/openjdk-16+28-darwin", - "/jdk/openjdk-16+28-linux", - "/jdk/openjdk-16+28-windows", - "/jdk/openjdk-16+29-darwin", - "/jdk/openjdk-16+29-linux", - "/jdk/openjdk-16+29-windows", - "/jdk/openjdk-16+30-darwin", - "/jdk/openjdk-16+30-linux", - "/jdk/openjdk-16+30-windows", - "/jdk/openjdk-16+31-darwin", - "/jdk/openjdk-16+31-linux", - "/jdk/openjdk-16+31-windows", - "/jdk/openjdk-16+32-darwin", - "/jdk/openjdk-16+32-linux", - "/jdk/openjdk-16+32-windows", - "/jdk/openjdk-16+33-darwin", - "/jdk/openjdk-16+33-linux", - "/jdk/openjdk-16+33-windows", - "/jdk/openjdk-16+34-darwin", - "/jdk/openjdk-16+34-linux", - "/jdk/openjdk-16+34-windows", - "/jdk/openjdk-16+35-darwin", - "/jdk/openjdk-16+35-linux", - "/jdk/openjdk-16+35-windows", - "/jdk/openjdk-16+36-darwin", - "/jdk/openjdk-16+36-linux", - "/jdk/openjdk-16+36-linux-aarch64", - "/jdk/openjdk-16+36-windows", - "/jdk/openjdk-16.0.1+9-darwin", - "/jdk/openjdk-16.0.1+9-linux", - "/jdk/openjdk-16.0.1+9-linux-aarch64", - "/jdk/openjdk-16.0.1+9-windows", - "/jdk/openjdk-16.0.2+7-darwin", - "/jdk/openjdk-16.0.2+7-linux", - "/jdk/openjdk-16.0.2+7-linux-aarch64", - "/jdk/openjdk-16.0.2+7-windows", - "/jdk/openjdk-17+17-darwin", - "/jdk/openjdk-17+17-linux", - "/jdk/openjdk-17+17-linux-aarch64", - "/jdk/openjdk-17+17-windows", - "/jdk/openjdk-17+18-darwin", - "/jdk/openjdk-17+18-linux", - "/jdk/openjdk-17+18-linux-aarch64", - "/jdk/openjdk-17+18-windows", - "/jdk/openjdk-17+19-darwin", - "/jdk/openjdk-17+19-linux", - "/jdk/openjdk-17+19-linux-aarch64", - "/jdk/openjdk-17+19-windows", - "/jdk/openjdk-17+20-darwin", - "/jdk/openjdk-17+20-linux", - "/jdk/openjdk-17+20-linux-aarch64", - "/jdk/openjdk-17+20-windows", - "/jdk/openjdk-17+21-linux", - "/jdk/openjdk-17+21-linux-aarch64", - "/jdk/openjdk-17+21-windows", - "/jdk/openjdk-17+22-linux", - "/jdk/openjdk-17+22-linux-aarch64", - "/jdk/openjdk-17+22-windows", - "/jdk/openjdk-17+23-linux", - "/jdk/openjdk-17+23-linux-aarch64", - "/jdk/openjdk-17+23-windows", - "/jdk/openjdk-17+24-linux", - "/jdk/openjdk-17+24-linux-aarch64", - "/jdk/openjdk-17+24-windows", - "/jdk/openjdk-17+25-linux", - "/jdk/openjdk-17+25-linux-aarch64", - "/jdk/openjdk-17+25-windows", - "/jdk/openjdk-17+26-linux", - "/jdk/openjdk-17+26-linux-aarch64", - "/jdk/openjdk-17+26-windows", - "/jdk/openjdk-17+27-linux", - "/jdk/openjdk-17+27-linux-aarch64", - "/jdk/openjdk-17+27-windows", - "/jdk/openjdk-17+28-linux", - "/jdk/openjdk-17+28-linux-aarch64", - "/jdk/openjdk-17+28-windows", - "/jdk/openjdk-17+29-linux", - "/jdk/openjdk-17+29-linux-aarch64", - "/jdk/openjdk-17+29-windows", - "/jdk/openjdk-17+30-linux", - "/jdk/openjdk-17+30-linux-aarch64", - "/jdk/openjdk-17+30-windows", - "/jdk/openjdk-17+31-linux", - "/jdk/openjdk-17+31-linux-aarch64", - "/jdk/openjdk-17+31-windows", - "/jdk/openjdk-17+32-linux", - "/jdk/openjdk-17+32-linux-aarch64", - "/jdk/openjdk-17+32-windows", - "/jdk/openjdk-17+33-linux", - "/jdk/openjdk-17+33-linux-aarch64", - "/jdk/openjdk-17+33-windows", - "/jdk/openjdk-17+35-linux", - "/jdk/openjdk-17+35-linux-aarch64", - "/jdk/openjdk-17+35-windows", - "/jdk/openjdk-17.0.1+12-darwin", - "/jdk/openjdk-17.0.1+12-darwin-aarch64", - "/jdk/openjdk-17.0.1+12-linux", - "/jdk/openjdk-17.0.1+12-linux-aarch64", - "/jdk/openjdk-17.0.1+12-windows", - "/jdk/openjdk-17.0.2+8-darwin", - "/jdk/openjdk-17.0.2+8-darwin-aarch64", - "/jdk/openjdk-17.0.2+8-linux", - "/jdk/openjdk-17.0.2+8-linux-aarch64", - "/jdk/openjdk-17.0.2+8-windows", - "/jdk/openjdk-18+22-darwin", - "/jdk/openjdk-18+22-darwin-aarch64", - "/jdk/openjdk-18+22-linux", - "/jdk/openjdk-18+22-linux-aarch64", - "/jdk/openjdk-18+22-windows", - "/jdk/openjdk-18+23-darwin", - "/jdk/openjdk-18+23-darwin-aarch64", - "/jdk/openjdk-18+23-linux", - "/jdk/openjdk-18+23-linux-aarch64", - "/jdk/openjdk-18+23-windows", - "/jdk/openjdk-18+24-darwin", - "/jdk/openjdk-18+24-darwin-aarch64", - "/jdk/openjdk-18+24-linux", - "/jdk/openjdk-18+24-linux-aarch64", - "/jdk/openjdk-18+24-windows", - "/jdk/openjdk-18+25-darwin", - "/jdk/openjdk-18+25-darwin-aarch64", - "/jdk/openjdk-18+25-linux", - "/jdk/openjdk-18+25-linux-aarch64", - "/jdk/openjdk-18+25-windows", - "/jdk/openjdk-18+26-darwin", - "/jdk/openjdk-18+26-darwin-aarch64", - "/jdk/openjdk-18+26-linux", - "/jdk/openjdk-18+26-linux-aarch64", - "/jdk/openjdk-18+26-windows", - "/jdk/openjdk-18+27-darwin", - "/jdk/openjdk-18+27-darwin-aarch64", - "/jdk/openjdk-18+27-linux", - "/jdk/openjdk-18+27-linux-aarch64", - "/jdk/openjdk-18+27-windows", - "/jdk/openjdk-18+28-darwin", - "/jdk/openjdk-18+28-darwin-aarch64", - "/jdk/openjdk-18+28-linux", - "/jdk/openjdk-18+28-linux-aarch64", - "/jdk/openjdk-18+28-windows", - "/jdk/openjdk-18+29-darwin", - "/jdk/openjdk-18+29-darwin-aarch64", - "/jdk/openjdk-18+29-linux", - "/jdk/openjdk-18+29-linux-aarch64", - "/jdk/openjdk-18+29-windows", - "/jdk/openjdk-18+30-darwin", - "/jdk/openjdk-18+30-darwin-aarch64", - "/jdk/openjdk-18+30-linux", - "/jdk/openjdk-18+30-linux-aarch64", - "/jdk/openjdk-18+30-windows", - "/jdk/openjdk-18+31-darwin", - "/jdk/openjdk-18+31-darwin-aarch64", - "/jdk/openjdk-18+31-linux", - "/jdk/openjdk-18+31-linux-aarch64", - "/jdk/openjdk-18+31-windows", - "/jdk/openjdk-18+33-darwin", - "/jdk/openjdk-18+33-darwin-aarch64", - "/jdk/openjdk-18+33-linux", - "/jdk/openjdk-18+33-linux-aarch64", - "/jdk/openjdk-18+33-windows", - "/jdk/openjdk-18+34-darwin", - "/jdk/openjdk-18+34-darwin-aarch64", - "/jdk/openjdk-18+34-linux", - "/jdk/openjdk-18+34-linux-aarch64", - "/jdk/openjdk-18+34-windows", - "/jdk/openjdk-18+35-darwin", - "/jdk/openjdk-18+35-darwin-aarch64", - "/jdk/openjdk-18+35-linux", - "/jdk/openjdk-18+35-linux-aarch64", - "/jdk/openjdk-18+35-windows", - "/jdk/openjdk-18+36-darwin", - "/jdk/openjdk-18+36-darwin-aarch64", - "/jdk/openjdk-18+36-linux", - "/jdk/openjdk-18+36-linux-aarch64", - "/jdk/openjdk-18+36-windows", - "/jdk/openjdk-18.0.1+10-darwin", - "/jdk/openjdk-18.0.1+10-darwin-aarch64", - "/jdk/openjdk-18.0.1+10-linux", - "/jdk/openjdk-18.0.1+10-linux-aarch64", - "/jdk/openjdk-18.0.1+10-windows", - "/jdk/openjdk-18.0.1.1+2-darwin", - "/jdk/openjdk-18.0.1.1+2-darwin-aarch64", - "/jdk/openjdk-18.0.1.1+2-linux", - "/jdk/openjdk-18.0.1.1+2-linux-aarch64", - "/jdk/openjdk-18.0.1.1+2-windows", - "/jdk/openjdk-18.0.2+9-darwin", - "/jdk/openjdk-18.0.2+9-darwin-aarch64", - "/jdk/openjdk-18.0.2+9-linux", - "/jdk/openjdk-18.0.2+9-linux-aarch64", - "/jdk/openjdk-18.0.2+9-windows", - "/jdk/openjdk-18.0.2.1+1-darwin", - "/jdk/openjdk-18.0.2.1+1-darwin-aarch64", - "/jdk/openjdk-18.0.2.1+1-linux", - "/jdk/openjdk-18.0.2.1+1-linux-aarch64", - "/jdk/openjdk-18.0.2.1+1-windows", - "/jdk/openjdk-19+14-darwin", - "/jdk/openjdk-19+14-darwin-aarch64", - "/jdk/openjdk-19+14-linux", - "/jdk/openjdk-19+14-linux-aarch64", - "/jdk/openjdk-19+14-windows", - "/jdk/openjdk-19+15-darwin", - "/jdk/openjdk-19+15-darwin-aarch64", - "/jdk/openjdk-19+15-linux", - "/jdk/openjdk-19+15-linux-aarch64", - "/jdk/openjdk-19+15-windows", - "/jdk/openjdk-19+16-darwin", - "/jdk/openjdk-19+16-darwin-aarch64", - "/jdk/openjdk-19+16-linux", - "/jdk/openjdk-19+16-linux-aarch64", - "/jdk/openjdk-19+16-windows", - "/jdk/openjdk-19+17-darwin", - "/jdk/openjdk-19+17-darwin-aarch64", - "/jdk/openjdk-19+17-linux", - "/jdk/openjdk-19+17-linux-aarch64", - "/jdk/openjdk-19+17-windows", - "/jdk/openjdk-19+18-darwin", - "/jdk/openjdk-19+18-darwin-aarch64", - "/jdk/openjdk-19+18-linux", - "/jdk/openjdk-19+18-linux-aarch64", - "/jdk/openjdk-19+18-windows", - "/jdk/openjdk-19+19-darwin", - "/jdk/openjdk-19+19-darwin-aarch64", - "/jdk/openjdk-19+19-linux", - "/jdk/openjdk-19+19-linux-aarch64", - "/jdk/openjdk-19+19-windows", - "/jdk/openjdk-19+20-darwin", - "/jdk/openjdk-19+20-darwin-aarch64", - "/jdk/openjdk-19+20-linux", - "/jdk/openjdk-19+20-linux-aarch64", - "/jdk/openjdk-19+20-windows", - "/jdk/openjdk-19+21-darwin", - "/jdk/openjdk-19+21-darwin-aarch64", - "/jdk/openjdk-19+21-linux", - "/jdk/openjdk-19+21-linux-aarch64", - "/jdk/openjdk-19+21-windows", - "/jdk/openjdk-19+22-darwin", - "/jdk/openjdk-19+22-darwin-aarch64", - "/jdk/openjdk-19+22-linux", - "/jdk/openjdk-19+22-linux-aarch64", - "/jdk/openjdk-19+22-windows", - "/jdk/openjdk-19+23-darwin", - "/jdk/openjdk-19+23-darwin-aarch64", - "/jdk/openjdk-19+23-linux", - "/jdk/openjdk-19+23-linux-aarch64", - "/jdk/openjdk-19+23-windows", - "/jdk/openjdk-19+24-darwin", - "/jdk/openjdk-19+24-darwin-aarch64", - "/jdk/openjdk-19+24-linux", - "/jdk/openjdk-19+24-linux-aarch64", - "/jdk/openjdk-19+24-windows", - "/jdk/openjdk-19+25-darwin", - "/jdk/openjdk-19+25-darwin-aarch64", - "/jdk/openjdk-19+25-linux", - "/jdk/openjdk-19+25-linux-aarch64", - "/jdk/openjdk-19+25-windows", - "/jdk/openjdk-19+26-darwin", - "/jdk/openjdk-19+26-darwin-aarch64", - "/jdk/openjdk-19+26-linux", - "/jdk/openjdk-19+26-linux-aarch64", - "/jdk/openjdk-19+26-windows", - "/jdk/openjdk-19+27-darwin", - "/jdk/openjdk-19+27-darwin-aarch64", - "/jdk/openjdk-19+27-linux", - "/jdk/openjdk-19+27-linux-aarch64", - "/jdk/openjdk-19+27-windows", - "/jdk/openjdk-19+28-darwin", - "/jdk/openjdk-19+28-darwin-aarch64", - "/jdk/openjdk-19+28-linux", - "/jdk/openjdk-19+28-linux-aarch64", - "/jdk/openjdk-19+28-windows", - "/jdk/openjdk-19+29-darwin", - "/jdk/openjdk-19+29-darwin-aarch64", - "/jdk/openjdk-19+29-linux", - "/jdk/openjdk-19+29-linux-aarch64", - "/jdk/openjdk-19+29-windows", - "/jdk/openjdk-19+30-darwin", - "/jdk/openjdk-19+30-darwin-aarch64", - "/jdk/openjdk-19+30-linux", - "/jdk/openjdk-19+30-linux-aarch64", - "/jdk/openjdk-19+30-windows", - "/jdk/openjdk-19+31-darwin", - "/jdk/openjdk-19+31-darwin-aarch64", - "/jdk/openjdk-19+31-linux", - "/jdk/openjdk-19+31-linux-aarch64", - "/jdk/openjdk-19+31-windows", - "/jdk/openjdk-19+32-darwin", - "/jdk/openjdk-19+32-darwin-aarch64", - "/jdk/openjdk-19+32-linux", - "/jdk/openjdk-19+32-linux-aarch64", - "/jdk/openjdk-19+32-windows", - "/jdk/openjdk-19+33-darwin", - "/jdk/openjdk-19+33-darwin-aarch64", - "/jdk/openjdk-19+33-linux", - "/jdk/openjdk-19+33-linux-aarch64", - "/jdk/openjdk-19+33-windows", - "/jdk/openjdk-19+34-darwin", - "/jdk/openjdk-19+34-darwin-aarch64", - "/jdk/openjdk-19+34-linux", - "/jdk/openjdk-19+34-linux-aarch64", - "/jdk/openjdk-19+34-windows", - "/jdk/openjdk-19+35-darwin", - "/jdk/openjdk-19+35-darwin-aarch64", - "/jdk/openjdk-19+35-linux", - "/jdk/openjdk-19+35-linux-aarch64", - "/jdk/openjdk-19+35-windows", - "/jdk/openjdk-19+36-darwin", - "/jdk/openjdk-19+36-darwin-aarch64", - "/jdk/openjdk-19+36-linux", - "/jdk/openjdk-19+36-linux-aarch64", - "/jdk/openjdk-19+36-windows", - "/jdk/openjdk-19.0.1+10-darwin", - "/jdk/openjdk-19.0.1+10-darwin-aarch64", - "/jdk/openjdk-19.0.1+10-linux", - "/jdk/openjdk-19.0.1+10-linux-aarch64", - "/jdk/openjdk-19.0.1+10-windows", - "/jdk/openjdk-19.0.2+7-darwin", - "/jdk/openjdk-19.0.2+7-darwin-aarch64", - "/jdk/openjdk-19.0.2+7-linux", - "/jdk/openjdk-19.0.2+7-linux-aarch64", - "/jdk/openjdk-19.0.2+7-windows", - "/jdk/openjdk-20+33-darwin", - "/jdk/openjdk-20+33-darwin-aarch64", - "/jdk/openjdk-20+33-linux", - "/jdk/openjdk-20+33-linux-aarch64", - "/jdk/openjdk-20+33-windows", - "/jdk/openjdk-20+34-darwin", - "/jdk/openjdk-20+34-darwin-aarch64", - "/jdk/openjdk-20+34-linux", - "/jdk/openjdk-20+34-linux-aarch64", - "/jdk/openjdk-20+34-windows", - "/jdk/openjdk-20+35-darwin", - "/jdk/openjdk-20+35-darwin-aarch64", - "/jdk/openjdk-20+35-linux", - "/jdk/openjdk-20+35-linux-aarch64", - "/jdk/openjdk-20+35-windows", - "/jdk/openjdk-20+36-darwin", - "/jdk/openjdk-20+36-darwin-aarch64", - "/jdk/openjdk-20+36-linux", - "/jdk/openjdk-20+36-linux-aarch64", - "/jdk/openjdk-20+36-windows", - "/jdk/openjdk-20.0.1+9-darwin", - "/jdk/openjdk-20.0.1+9-darwin-aarch64", - "/jdk/openjdk-20.0.1+9-linux", - "/jdk/openjdk-20.0.1+9-linux-aarch64", - "/jdk/openjdk-20.0.1+9-windows", - "/jdk/openjdk-20.0.2+9-darwin", - "/jdk/openjdk-20.0.2+9-darwin-aarch64", - "/jdk/openjdk-20.0.2+9-linux", - "/jdk/openjdk-20.0.2+9-linux-aarch64", - "/jdk/openjdk-20.0.2+9-windows", - "/jdk/openjdk-21+25-darwin", - "/jdk/openjdk-21+25-darwin-aarch64", - "/jdk/openjdk-21+25-linux", - "/jdk/openjdk-21+25-linux-aarch64", - "/jdk/openjdk-21+25-windows", - "/jdk/openjdk-21+26-darwin", - "/jdk/openjdk-21+26-darwin-aarch64", - "/jdk/openjdk-21+26-linux", - "/jdk/openjdk-21+26-linux-aarch64", - "/jdk/openjdk-21+26-windows", - "/jdk/openjdk-21+27-darwin", - "/jdk/openjdk-21+27-darwin-aarch64", - "/jdk/openjdk-21+27-linux", - "/jdk/openjdk-21+27-linux-aarch64", - "/jdk/openjdk-21+27-windows", - "/jdk/openjdk-21+28-darwin", - "/jdk/openjdk-21+28-darwin-aarch64", - "/jdk/openjdk-21+28-linux", - "/jdk/openjdk-21+28-linux-aarch64", - "/jdk/openjdk-21+28-windows", - "/jdk/openjdk-21+29-darwin", - "/jdk/openjdk-21+29-darwin-aarch64", - "/jdk/openjdk-21+29-linux", - "/jdk/openjdk-21+29-linux-aarch64", - "/jdk/openjdk-21+29-windows", - "/jdk/openjdk-21+30-darwin", - "/jdk/openjdk-21+30-darwin-aarch64", - "/jdk/openjdk-21+30-linux", - "/jdk/openjdk-21+30-linux-aarch64", - "/jdk/openjdk-21+30-windows", - "/jdk/openjdk-21+31-darwin", - "/jdk/openjdk-21+31-darwin-aarch64", - "/jdk/openjdk-21+31-linux", - "/jdk/openjdk-21+31-linux-aarch64", - "/jdk/openjdk-21+31-windows", - "/jdk/openjdk-21+32-darwin", - "/jdk/openjdk-21+32-darwin-aarch64", - "/jdk/openjdk-21+32-linux", - "/jdk/openjdk-21+32-linux-aarch64", - "/jdk/openjdk-21+32-windows", - "/jdk/openjdk-21+33-darwin", - "/jdk/openjdk-21+33-darwin-aarch64", - "/jdk/openjdk-21+33-linux", - "/jdk/openjdk-21+33-linux-aarch64", - "/jdk/openjdk-21+33-windows", - "/jdk/openjdk-21+34-darwin", - "/jdk/openjdk-21+34-darwin-aarch64", - "/jdk/openjdk-21+34-linux", - "/jdk/openjdk-21+34-linux-aarch64", - "/jdk/openjdk-21+34-windows", - "/jdk/openjdk-21+35-darwin", - "/jdk/openjdk-21+35-darwin-aarch64", - "/jdk/openjdk-21+35-linux", - "/jdk/openjdk-21+35-linux-aarch64", - "/jdk/openjdk-21+35-windows", - "/jdk/openjdk-21.0.1+12-darwin", - "/jdk/openjdk-21.0.1+12-darwin-aarch64", - "/jdk/openjdk-21.0.1+12-linux", - "/jdk/openjdk-21.0.1+12-linux-aarch64", - "/jdk/openjdk-21.0.1+12-windows", - "/jdk/openjdk-21.0.2+13-darwin", - "/jdk/openjdk-21.0.2+13-darwin-aarch64", - "/jdk/openjdk-21.0.2+13-linux", - "/jdk/openjdk-21.0.2+13-linux-aarch64", - "/jdk/openjdk-21.0.2+13-windows", - "/jdk/openjdk-22+28-darwin", - "/jdk/openjdk-22+28-darwin-aarch64", - "/jdk/openjdk-22+28-linux", - "/jdk/openjdk-22+28-linux-aarch64", - "/jdk/openjdk-22+28-windows", - "/jdk/openjdk-22+29-darwin", - "/jdk/openjdk-22+29-darwin-aarch64", - "/jdk/openjdk-22+29-linux", - "/jdk/openjdk-22+29-linux-aarch64", - "/jdk/openjdk-22+29-windows", - "/jdk/openjdk-22+30-darwin", - "/jdk/openjdk-22+30-darwin-aarch64", - "/jdk/openjdk-22+30-linux", - "/jdk/openjdk-22+30-linux-aarch64", - "/jdk/openjdk-22+30-windows", - "/jdk/openjdk-22+31-darwin", - "/jdk/openjdk-22+31-darwin-aarch64", - "/jdk/openjdk-22+31-linux", - "/jdk/openjdk-22+31-linux-aarch64", - "/jdk/openjdk-22+31-windows", - "/jdk/openjdk-22+32-darwin", - "/jdk/openjdk-22+32-darwin-aarch64", - "/jdk/openjdk-22+32-linux", - "/jdk/openjdk-22+32-linux-aarch64", - "/jdk/openjdk-22+32-windows", - "/jdk/openjdk-22+33-darwin", - "/jdk/openjdk-22+33-darwin-aarch64", - "/jdk/openjdk-22+33-linux", - "/jdk/openjdk-22+33-linux-aarch64", - "/jdk/openjdk-22+33-windows", - "/jdk/openjdk-22+34-darwin", - "/jdk/openjdk-22+34-darwin-aarch64", - "/jdk/openjdk-22+34-linux", - "/jdk/openjdk-22+34-linux-aarch64", - "/jdk/openjdk-22+34-windows", - "/jdk/openjdk-22+35-darwin", - "/jdk/openjdk-22+35-darwin-aarch64", - "/jdk/openjdk-22+35-linux", - "/jdk/openjdk-22+35-linux-aarch64", - "/jdk/openjdk-22+35-windows", - "/jdk/openjdk-22+36-darwin", - "/jdk/openjdk-22+36-darwin-aarch64", - "/jdk/openjdk-22+36-linux", - "/jdk/openjdk-22+36-linux-aarch64", - "/jdk/openjdk-22+36-windows", - "/jdk/openjdk-22.0.1+8-darwin", - "/jdk/openjdk-22.0.1+8-darwin-aarch64", - "/jdk/openjdk-22.0.1+8-linux", - "/jdk/openjdk-22.0.1+8-linux-aarch64", - "/jdk/openjdk-22.0.1+8-windows", - "/jdk/openjdk-23+22-darwin", - "/jdk/openjdk-23+22-darwin-aarch64", - "/jdk/openjdk-23+22-linux", - "/jdk/openjdk-23+22-linux-aarch64", - "/jdk/openjdk-23+22-windows", - "/jdk/openjdk-23+23-darwin", - "/jdk/openjdk-23+23-darwin-aarch64", - "/jdk/openjdk-23+23-linux", - "/jdk/openjdk-23+23-linux-aarch64", - "/jdk/openjdk-23+23-windows", - "/jdk/openjdk-23+24-darwin", - "/jdk/openjdk-23+24-darwin-aarch64", - "/jdk/openjdk-23+24-linux", - "/jdk/openjdk-23+24-linux-aarch64", - "/jdk/openjdk-23+24-windows", - "/jdk/openjdk-23+25-darwin", - "/jdk/openjdk-23+25-darwin-aarch64", - "/jdk/openjdk-23+25-linux", - "/jdk/openjdk-23+25-linux-aarch64", - "/jdk/openjdk-23+25-windows", - "/jdk/openjdk-23+26-darwin", - "/jdk/openjdk-23+26-darwin-aarch64", - "/jdk/openjdk-23+26-linux", - "/jdk/openjdk-23+26-linux-aarch64", - "/jdk/openjdk-23+26-windows", - "/jdk/openjdk-23+27-darwin", - "/jdk/openjdk-23+27-darwin-aarch64", - "/jdk/openjdk-23+27-linux", - "/jdk/openjdk-23+27-linux-aarch64", - "/jdk/openjdk-23+27-windows", - "/jdk/openjdk-23+28-darwin", - "/jdk/openjdk-23+28-darwin-aarch64", - "/jdk/openjdk-23+28-linux", - "/jdk/openjdk-23+28-linux-aarch64", - "/jdk/openjdk-23+28-windows", - "/jdk/openjdk-23+29-darwin", - "/jdk/openjdk-23+29-darwin-aarch64", - "/jdk/openjdk-23+29-linux", - "/jdk/openjdk-23+29-linux-aarch64", - "/jdk/openjdk-23+29-windows", - "/jdk/openjdk-23+30-darwin", - "/jdk/openjdk-23+30-darwin-aarch64", - "/jdk/openjdk-23+30-linux", - "/jdk/openjdk-23+30-linux-aarch64", - "/jdk/openjdk-23+30-windows", - "/jdk/openjdk-9.0.4-darwin", - "/jdk/openjdk-9.0.4-linux", - "/jdk/openjdk-9.0.4-windows", - "/jdk/oracle-10+43-darwin", - "/jdk/oracle-10+43-linux", - "/jdk/oracle-10+43-windows", - "/jdk/oracle-10+46-darwin", - "/jdk/oracle-10+46-linux", - "/jdk/oracle-10+46-windows", - "/jdk/oracle-11+11-darwin", - "/jdk/oracle-11+11-linux", - "/jdk/oracle-11+11-windows", - "/jdk/oracle-11+12-darwin", - "/jdk/oracle-11+12-linux", - "/jdk/oracle-11+12-windows", - "/jdk/oracle-11+13-darwin", - "/jdk/oracle-11+13-linux", - "/jdk/oracle-11+13-windows", - "/jdk/oracle-11+14-darwin", - "/jdk/oracle-11+14-linux", - "/jdk/oracle-11+14-windows", - "/jdk/oracle-11+15-darwin", - "/jdk/oracle-11+15-linux", - "/jdk/oracle-11+15-windows", - "/jdk/oracle-11+16-darwin", - "/jdk/oracle-11+16-linux", - "/jdk/oracle-11+16-windows", - "/jdk/oracle-11+17-darwin", - "/jdk/oracle-11+17-linux", - "/jdk/oracle-11+17-windows", - "/jdk/oracle-11+18-darwin", - "/jdk/oracle-11+18-linux", - "/jdk/oracle-11+18-windows", - "/jdk/oracle-11+19-darwin", - "/jdk/oracle-11+19-linux", - "/jdk/oracle-11+19-windows", - "/jdk/oracle-11+20-darwin", - "/jdk/oracle-11+20-linux", - "/jdk/oracle-11+20-windows", - "/jdk/oracle-11+21-darwin", - "/jdk/oracle-11+21-linux", - "/jdk/oracle-11+21-windows", - "/jdk/oracle-11+22-darwin", - "/jdk/oracle-11+22-linux", - "/jdk/oracle-11+22-windows", - "/jdk/oracle-11+23-darwin", - "/jdk/oracle-11+23-linux", - "/jdk/oracle-11+23-windows", - "/jdk/oracle-11+24-darwin", - "/jdk/oracle-11+24-linux", - "/jdk/oracle-11+24-windows", - "/jdk/oracle-11+25-darwin", - "/jdk/oracle-11+25-linux", - "/jdk/oracle-11+25-windows", - "/jdk/oracle-11+26-darwin", - "/jdk/oracle-11+26-linux", - "/jdk/oracle-11+26-windows", - "/jdk/oracle-11+27-darwin", - "/jdk/oracle-11+27-linux", - "/jdk/oracle-11+27-windows", - "/jdk/oracle-11+28-darwin", - "/jdk/oracle-11+28-linux", - "/jdk/oracle-11+28-windows", - "/jdk/oracle-11+5-darwin", - "/jdk/oracle-11+5-linux", - "/jdk/oracle-11+5-windows", - "/jdk/oracle-11.0.11-darwin", - "/jdk/oracle-11.0.11-linux", - "/jdk/oracle-11.0.11-linux-aarch64", - "/jdk/oracle-11.0.11-windows", - "/jdk/oracle-11.0.12+8-darwin", - "/jdk/oracle-11.0.12+8-linux", - "/jdk/oracle-11.0.12+8-linux-aarch64", - "/jdk/oracle-11.0.12+8-windows", - "/jdk/oracle-11.0.2+7-darwin", - "/jdk/oracle-11.0.2+7-linux", - "/jdk/oracle-11.0.2+7-windows", - "/jdk/oracle-11.0.2+9-darwin", - "/jdk/oracle-11.0.2+9-linux", - "/jdk/oracle-11.0.2+9-windows", - "/jdk/oracle-11.0.3+12-darwin", - "/jdk/oracle-11.0.3+12-linux", - "/jdk/oracle-11.0.3+12-windows", - "/jdk/oracle-11.0.4+10-darwin", - "/jdk/oracle-11.0.4+10-linux", - "/jdk/oracle-11.0.4+10-windows", - "/jdk/oracle-11.0.5+10-darwin", - "/jdk/oracle-11.0.5+10-linux", - "/jdk/oracle-11.0.5+10-windows", - "/jdk/oracle-11.0.6+8-darwin", - "/jdk/oracle-11.0.6+8-linux", - "/jdk/oracle-11.0.6+8-windows", - "/jdk/oracle-12+33-darwin", - "/jdk/oracle-12+33-linux", - "/jdk/oracle-12+33-windows", - "/jdk/oracle-12.0.1+12-darwin", - "/jdk/oracle-12.0.1+12-linux", - "/jdk/oracle-12.0.1+12-windows", - "/jdk/oracle-12.0.2+10-darwin", - "/jdk/oracle-12.0.2+10-linux", - "/jdk/oracle-12.0.2+10-windows", - "/jdk/oracle-13+33-darwin", - "/jdk/oracle-13+33-linux", - "/jdk/oracle-13+33-windows", - "/jdk/oracle-13.0.1+9-darwin", - "/jdk/oracle-13.0.1+9-linux", - "/jdk/oracle-13.0.1+9-windows", - "/jdk/oracle-13.0.2+8-darwin", - "/jdk/oracle-13.0.2+8-linux", - "/jdk/oracle-13.0.2+8-windows", - "/jdk/oracle-16.0.1+9-darwin", - "/jdk/oracle-16.0.1+9-linux", - "/jdk/oracle-16.0.1+9-linux-aarch64", - "/jdk/oracle-16.0.1+9-windows", - "/jdk/oracle-16.0.2+7-darwin", - "/jdk/oracle-16.0.2+7-linux", - "/jdk/oracle-16.0.2+7-linux-aarch64", - "/jdk/oracle-16.0.2+7-windows", - "/jdk/oracle-7u80-darwin", - "/jdk/oracle-7u80-linux", - "/jdk/oracle-7u80-windows", - "/jdk/oracle-8u161-darwin", - "/jdk/oracle-8u161-linux", - "/jdk/oracle-8u161-windows", - "/jdk/oracle-8u162-darwin", - "/jdk/oracle-8u162-linux", - "/jdk/oracle-8u162-windows", - "/jdk/oracle-8u171-darwin", - "/jdk/oracle-8u171-linux", - "/jdk/oracle-8u171-windows", - "/jdk/oracle-8u172-darwin", - "/jdk/oracle-8u172-linux", - "/jdk/oracle-8u172-windows", - "/jdk/oracle-8u181-darwin", - "/jdk/oracle-8u181-linux", - "/jdk/oracle-8u181-windows", - "/jdk/oracle-8u191-darwin", - "/jdk/oracle-8u191-linux", - "/jdk/oracle-8u191-windows", - "/jdk/oracle-8u192-darwin", - "/jdk/oracle-8u192-linux", - "/jdk/oracle-8u192-windows", - "/jdk/oracle-8u201-darwin", - "/jdk/oracle-8u201-linux", - "/jdk/oracle-8u201-windows", - "/jdk/oracle-8u202-darwin", - "/jdk/oracle-8u202-linux", - "/jdk/oracle-8u202-windows", - "/jdk/oracle-8u211-darwin", - "/jdk/oracle-8u211-linux", - "/jdk/oracle-8u211-windows", - "/jdk/oracle-8u212-darwin", - "/jdk/oracle-8u212-linux", - "/jdk/oracle-8u212-windows", - "/jdk/oracle-8u221-darwin", - "/jdk/oracle-8u221-linux", - "/jdk/oracle-8u221-windows", - "/jdk/oracle-8u231-darwin", - "/jdk/oracle-8u231-linux", - "/jdk/oracle-8u231-windows", - "/jdk/oracle-8u241-darwin", - "/jdk/oracle-8u241-linux", - "/jdk/oracle-8u241-windows", - "/jdk/oracle-8u271-darwin", - "/jdk/oracle-8u271-linux", - "/jdk/oracle-8u271-linux-aarch64", - "/jdk/oracle-8u271-linux-x86_32", - "/jdk/oracle-8u271-windows", - "/jdk/oracle-8u271-windows-x86_32", - "/jdk/oracle-8u281-darwin", - "/jdk/oracle-8u281-linux", - "/jdk/oracle-8u281-linux-aarch64", - "/jdk/oracle-8u281-linux-x86_32", - "/jdk/oracle-8u281-windows", - "/jdk/oracle-8u281-windows-x86_32", - "/jdk/oracle-8u291-darwin", - "/jdk/oracle-8u291-linux", - "/jdk/oracle-8u291-linux-aarch64", - "/jdk/oracle-8u291-linux-x86_32", - "/jdk/oracle-8u291-windows", - "/jdk/oracle-8u291-windows-x86_32", - "/jdk/oracle-8u301-darwin", - "/jdk/oracle-8u301-linux", - "/jdk/oracle-8u301-linux-aarch64", - "/jdk/oracle-8u301-linux-x86_32", - "/jdk/oracle-8u301-windows", - "/jdk/oracle-8u301-windows-x86_32", - "/jdk/oracle-9.0.4+11-darwin", - "/jdk/oracle-9.0.4+11-linux", - "/jdk/oracle-9.0.4+11-windows", - "/jdk/sapjvm-7.1.073-linux", - "/jdk/sapjvm-8.1.065-linux", - "/jdk/sapjvm-8.1.067-linux", - "/jdk/sapjvm-8.1.071-linux", - "/jdk/zulu-1.8.0.131-linux-aarch64", - "/jdk/zulu-1.8.0.144-linux-aarch64", - "/jdk/zulu-1.8.0.152-linux-aarch64", - "/jdk/zulu-1.8.0.162-linux-aarch64", - "/jdk/zulu-1.8.0.172-linux-aarch64", - "/jdk/zulu-1.8.0.181-linux-aarch64", - "/jdk/zulu-1.8.0.192-linux-aarch64", - "/jdk/zulu-1.8.0.202-linux-aarch64", - "/jdk/zulu-1.8.0.212-linux-aarch64", - "/jdk/zulu-10.0.0-darwin", - "/jdk/zulu-10.0.0-linux", - "/jdk/zulu-10.0.0-windows", - "/jdk/zulu-10.0.1-darwin", - "/jdk/zulu-10.0.1-linux", - "/jdk/zulu-10.0.1-windows", - "/jdk/zulu-10.0.2-darwin", - "/jdk/zulu-10.0.2-linux", - "/jdk/zulu-10.0.2-windows", - "/jdk/zulu-11.0.0-linux-aarch64", - "/jdk/zulu-11.0.1-darwin", - "/jdk/zulu-11.0.1-linux", - "/jdk/zulu-11.0.1-windows", - "/jdk/zulu-11.0.10-darwin", - "/jdk/zulu-11.0.10-darwin-aarch64", - "/jdk/zulu-11.0.10-linux", - "/jdk/zulu-11.0.10-windows", - "/jdk/zulu-11.0.11-darwin", - "/jdk/zulu-11.0.11-darwin-aarch64", - "/jdk/zulu-11.0.11-linux", - "/jdk/zulu-11.0.11-windows", - "/jdk/zulu-11.0.12-darwin", - "/jdk/zulu-11.0.12-darwin-aarch64", - "/jdk/zulu-11.0.12-linux", - "/jdk/zulu-11.0.12-windows", - "/jdk/zulu-11.0.13-darwin", - "/jdk/zulu-11.0.13-darwin-aarch64", - "/jdk/zulu-11.0.13-linux", - "/jdk/zulu-11.0.13-windows", - "/jdk/zulu-11.0.14-darwin", - "/jdk/zulu-11.0.14-darwin-aarch64", - "/jdk/zulu-11.0.14-linux", - "/jdk/zulu-11.0.14-windows", - "/jdk/zulu-11.0.14.1-darwin", - "/jdk/zulu-11.0.14.1-darwin-aarch64", - "/jdk/zulu-11.0.14.1-linux", - "/jdk/zulu-11.0.14.1-windows", - "/jdk/zulu-11.0.15-darwin", - "/jdk/zulu-11.0.15-darwin-aarch64", - "/jdk/zulu-11.0.15-linux", - "/jdk/zulu-11.0.15-windows", - "/jdk/zulu-11.0.16-darwin", - "/jdk/zulu-11.0.16-darwin-aarch64", - "/jdk/zulu-11.0.16-linux", - "/jdk/zulu-11.0.16-windows", - "/jdk/zulu-11.0.16.1-darwin", - "/jdk/zulu-11.0.16.1-darwin-aarch64", - "/jdk/zulu-11.0.16.1-linux", - "/jdk/zulu-11.0.16.1-windows", - "/jdk/zulu-11.0.17-darwin", - "/jdk/zulu-11.0.17-darwin-aarch64", - "/jdk/zulu-11.0.17-linux", - "/jdk/zulu-11.0.17-windows", - "/jdk/zulu-11.0.18-darwin", - "/jdk/zulu-11.0.18-darwin-aarch64", - "/jdk/zulu-11.0.18-linux", - "/jdk/zulu-11.0.18-windows", - "/jdk/zulu-11.0.19-darwin", - "/jdk/zulu-11.0.19-darwin-aarch64", - "/jdk/zulu-11.0.19-linux", - "/jdk/zulu-11.0.19-windows", - "/jdk/zulu-11.0.2-darwin", - "/jdk/zulu-11.0.2-linux", - "/jdk/zulu-11.0.2-windows", - "/jdk/zulu-11.0.20-darwin", - "/jdk/zulu-11.0.20-darwin-aarch64", - "/jdk/zulu-11.0.20-linux", - "/jdk/zulu-11.0.20-linux-aarch64", - "/jdk/zulu-11.0.20-windows", - "/jdk/zulu-11.0.20.1-darwin", - "/jdk/zulu-11.0.20.1-darwin-aarch64", - "/jdk/zulu-11.0.20.1-linux", - "/jdk/zulu-11.0.20.1-linux-aarch64", - "/jdk/zulu-11.0.20.1-windows", - "/jdk/zulu-11.0.21-darwin", - "/jdk/zulu-11.0.21-darwin-aarch64", - "/jdk/zulu-11.0.21-linux", - "/jdk/zulu-11.0.21-linux-aarch64", - "/jdk/zulu-11.0.21-windows", - "/jdk/zulu-11.0.22-darwin", - "/jdk/zulu-11.0.22-darwin-aarch64", - "/jdk/zulu-11.0.22-linux", - "/jdk/zulu-11.0.22-linux-aarch64", - "/jdk/zulu-11.0.22-windows", - "/jdk/zulu-11.0.23-darwin", - "/jdk/zulu-11.0.23-darwin-aarch64", - "/jdk/zulu-11.0.23-linux", - "/jdk/zulu-11.0.23-linux-aarch64", - "/jdk/zulu-11.0.23-windows", - "/jdk/zulu-11.0.3-darwin", - "/jdk/zulu-11.0.3-linux", - "/jdk/zulu-11.0.3-linux-aarch64", - "/jdk/zulu-11.0.3-windows", - "/jdk/zulu-11.0.4-darwin", - "/jdk/zulu-11.0.4-linux", - "/jdk/zulu-11.0.4-windows", - "/jdk/zulu-11.0.5-darwin", - "/jdk/zulu-11.0.5-linux", - "/jdk/zulu-11.0.5-linux-aarch64", - "/jdk/zulu-11.0.5-windows", - "/jdk/zulu-11.0.6-darwin", - "/jdk/zulu-11.0.6-linux", - "/jdk/zulu-11.0.6-linux-aarch64", - "/jdk/zulu-11.0.6-windows", - "/jdk/zulu-11.0.7-darwin", - "/jdk/zulu-11.0.7-linux", - "/jdk/zulu-11.0.7-linux-aarch64", - "/jdk/zulu-11.0.7-windows", - "/jdk/zulu-11.0.8-darwin", - "/jdk/zulu-11.0.8-linux", - "/jdk/zulu-11.0.8-linux-aarch64", - "/jdk/zulu-11.0.8-windows", - "/jdk/zulu-11.0.9-darwin", - "/jdk/zulu-11.0.9-linux", - "/jdk/zulu-11.0.9-windows", - "/jdk/zulu-11.0.9.1-darwin", - "/jdk/zulu-11.0.9.1-darwin-aarch64", - "/jdk/zulu-11.0.9.1-linux", - "/jdk/zulu-11.0.9.1-windows", - "/jdk/zulu-12-darwin", - "/jdk/zulu-12-linux", - "/jdk/zulu-12-windows", - "/jdk/zulu-12.0.0-darwin", - "/jdk/zulu-12.0.0-linux", - "/jdk/zulu-12.0.0-windows", - "/jdk/zulu-12.0.1-darwin", - "/jdk/zulu-12.0.1-linux", - "/jdk/zulu-12.0.1-windows", - "/jdk/zulu-12.0.2-darwin", - "/jdk/zulu-12.0.2-linux", - "/jdk/zulu-12.0.2-windows", - "/jdk/zulu-13-darwin", - "/jdk/zulu-13-linux", - "/jdk/zulu-13-windows", - "/jdk/zulu-13.0.0-darwin", - "/jdk/zulu-13.0.0-linux", - "/jdk/zulu-13.0.0-windows", - "/jdk/zulu-13.0.1-darwin", - "/jdk/zulu-13.0.1-linux", - "/jdk/zulu-13.0.1-windows", - "/jdk/zulu-13.0.10-darwin", - "/jdk/zulu-13.0.10-darwin-aarch64", - "/jdk/zulu-13.0.10-linux", - "/jdk/zulu-13.0.10-windows", - "/jdk/zulu-13.0.11-darwin", - "/jdk/zulu-13.0.11-darwin-aarch64", - "/jdk/zulu-13.0.11-linux", - "/jdk/zulu-13.0.11-linux-aarch64", - "/jdk/zulu-13.0.11-windows", - "/jdk/zulu-13.0.12-darwin", - "/jdk/zulu-13.0.12-darwin-aarch64", - "/jdk/zulu-13.0.12-linux", - "/jdk/zulu-13.0.12-linux-aarch64", - "/jdk/zulu-13.0.12-windows", - "/jdk/zulu-13.0.13-darwin", - "/jdk/zulu-13.0.13-darwin-aarch64", - "/jdk/zulu-13.0.13-linux", - "/jdk/zulu-13.0.13-linux-aarch64", - "/jdk/zulu-13.0.13-windows", - "/jdk/zulu-13.0.14-darwin", - "/jdk/zulu-13.0.14-darwin-aarch64", - "/jdk/zulu-13.0.14-linux", - "/jdk/zulu-13.0.14-linux-aarch64", - "/jdk/zulu-13.0.14-windows", - "/jdk/zulu-13.0.2-darwin", - "/jdk/zulu-13.0.2-linux", - "/jdk/zulu-13.0.2-linux-aarch64", - "/jdk/zulu-13.0.2-windows", - "/jdk/zulu-13.0.3-darwin", - "/jdk/zulu-13.0.3-linux", - "/jdk/zulu-13.0.3-linux-aarch64", - "/jdk/zulu-13.0.3-windows", - "/jdk/zulu-13.0.4-darwin", - "/jdk/zulu-13.0.4-linux", - "/jdk/zulu-13.0.4-linux-aarch64", - "/jdk/zulu-13.0.4-windows", - "/jdk/zulu-13.0.5-darwin", - "/jdk/zulu-13.0.5-linux", - "/jdk/zulu-13.0.5-windows", - "/jdk/zulu-13.0.5.1-darwin", - "/jdk/zulu-13.0.5.1-darwin-aarch64", - "/jdk/zulu-13.0.5.1-linux", - "/jdk/zulu-13.0.5.1-windows", - "/jdk/zulu-13.0.6-darwin", - "/jdk/zulu-13.0.6-darwin-aarch64", - "/jdk/zulu-13.0.6-linux", - "/jdk/zulu-13.0.6-windows", - "/jdk/zulu-13.0.7-darwin", - "/jdk/zulu-13.0.7-darwin-aarch64", - "/jdk/zulu-13.0.7-linux", - "/jdk/zulu-13.0.7-windows", - "/jdk/zulu-13.0.8-darwin", - "/jdk/zulu-13.0.8-darwin-aarch64", - "/jdk/zulu-13.0.8-linux", - "/jdk/zulu-13.0.8-windows", - "/jdk/zulu-13.0.9-darwin", - "/jdk/zulu-13.0.9-darwin-aarch64", - "/jdk/zulu-13.0.9-linux", - "/jdk/zulu-13.0.9-windows", - "/jdk/zulu-14-darwin", - "/jdk/zulu-14-linux", - "/jdk/zulu-14-windows", - "/jdk/zulu-14.0.0-darwin", - "/jdk/zulu-14.0.0-linux", - "/jdk/zulu-14.0.0-windows", - "/jdk/zulu-14.0.1-darwin", - "/jdk/zulu-14.0.1-linux", - "/jdk/zulu-14.0.1-windows", - "/jdk/zulu-14.0.2-darwin", - "/jdk/zulu-14.0.2-linux", - "/jdk/zulu-14.0.2-windows", - "/jdk/zulu-15.0.0-darwin", - "/jdk/zulu-15.0.0-linux", - "/jdk/zulu-15.0.0-windows", - "/jdk/zulu-15.0.1-darwin", - "/jdk/zulu-15.0.1-darwin-aarch64", - "/jdk/zulu-15.0.1-linux", - "/jdk/zulu-15.0.1-windows", - "/jdk/zulu-15.0.10-darwin", - "/jdk/zulu-15.0.10-darwin-aarch64", - "/jdk/zulu-15.0.10-linux", - "/jdk/zulu-15.0.10-linux-aarch64", - "/jdk/zulu-15.0.10-windows", - "/jdk/zulu-15.0.2-darwin", - "/jdk/zulu-15.0.2-darwin-aarch64", - "/jdk/zulu-15.0.2-linux", - "/jdk/zulu-15.0.2-windows", - "/jdk/zulu-15.0.3-darwin", - "/jdk/zulu-15.0.3-darwin-aarch64", - "/jdk/zulu-15.0.3-linux", - "/jdk/zulu-15.0.3-windows", - "/jdk/zulu-15.0.4-darwin", - "/jdk/zulu-15.0.4-darwin-aarch64", - "/jdk/zulu-15.0.4-linux", - "/jdk/zulu-15.0.4-linux-aarch64", - "/jdk/zulu-15.0.4-windows", - "/jdk/zulu-15.0.5-darwin", - "/jdk/zulu-15.0.5-darwin-aarch64", - "/jdk/zulu-15.0.5-linux", - "/jdk/zulu-15.0.5-linux-aarch64", - "/jdk/zulu-15.0.5-windows", - "/jdk/zulu-15.0.6-darwin", - "/jdk/zulu-15.0.6-darwin-aarch64", - "/jdk/zulu-15.0.6-linux", - "/jdk/zulu-15.0.6-linux-aarch64", - "/jdk/zulu-15.0.6-windows", - "/jdk/zulu-15.0.7-darwin", - "/jdk/zulu-15.0.7-darwin-aarch64", - "/jdk/zulu-15.0.7-linux", - "/jdk/zulu-15.0.7-linux-aarch64", - "/jdk/zulu-15.0.7-windows", - "/jdk/zulu-15.0.8-darwin", - "/jdk/zulu-15.0.8-darwin-aarch64", - "/jdk/zulu-15.0.8-linux", - "/jdk/zulu-15.0.8-linux-aarch64", - "/jdk/zulu-15.0.8-windows", - "/jdk/zulu-15.0.9-darwin", - "/jdk/zulu-15.0.9-darwin-aarch64", - "/jdk/zulu-15.0.9-linux", - "/jdk/zulu-15.0.9-linux-aarch64", - "/jdk/zulu-15.0.9-windows", - "/jdk/zulu-16.0.0-darwin", - "/jdk/zulu-16.0.0-darwin-aarch64", - "/jdk/zulu-16.0.0-linux", - "/jdk/zulu-16.0.0-linux-aarch64", - "/jdk/zulu-16.0.0-windows", - "/jdk/zulu-16.0.1-darwin", - "/jdk/zulu-16.0.1-darwin-aarch64", - "/jdk/zulu-16.0.1-linux", - "/jdk/zulu-16.0.1-linux-aarch64", - "/jdk/zulu-16.0.1-windows", - "/jdk/zulu-16.0.1-windows-aarch64", - "/jdk/zulu-16.0.2-darwin", - "/jdk/zulu-16.0.2-darwin-aarch64", - "/jdk/zulu-16.0.2-linux", - "/jdk/zulu-16.0.2-linux-aarch64", - "/jdk/zulu-16.0.2-windows", - "/jdk/zulu-16.0.2-windows-aarch64", - "/jdk/zulu-17.0.0-darwin", - "/jdk/zulu-17.0.0-darwin-aarch64", - "/jdk/zulu-17.0.0-linux", - "/jdk/zulu-17.0.0-linux-aarch64", - "/jdk/zulu-17.0.0-windows", - "/jdk/zulu-17.0.0-windows-aarch64", - "/jdk/zulu-17.0.1-darwin", - "/jdk/zulu-17.0.1-darwin-aarch64", - "/jdk/zulu-17.0.1-linux", - "/jdk/zulu-17.0.1-linux-aarch64", - "/jdk/zulu-17.0.1-windows", - "/jdk/zulu-17.0.1-windows-aarch64", - "/jdk/zulu-17.0.10-darwin", - "/jdk/zulu-17.0.10-darwin-aarch64", - "/jdk/zulu-17.0.10-linux", - "/jdk/zulu-17.0.10-linux-aarch64", - "/jdk/zulu-17.0.10-windows", - "/jdk/zulu-17.0.10-windows-aarch64", - "/jdk/zulu-17.0.11-darwin", - "/jdk/zulu-17.0.11-darwin-aarch64", - "/jdk/zulu-17.0.11-linux", - "/jdk/zulu-17.0.11-linux-aarch64", - "/jdk/zulu-17.0.11-windows", - "/jdk/zulu-17.0.11-windows-aarch64", - "/jdk/zulu-17.0.2-darwin", - "/jdk/zulu-17.0.2-darwin-aarch64", - "/jdk/zulu-17.0.2-linux", - "/jdk/zulu-17.0.2-linux-aarch64", - "/jdk/zulu-17.0.2-windows", - "/jdk/zulu-17.0.2-windows-aarch64", - "/jdk/zulu-17.0.3-darwin", - "/jdk/zulu-17.0.3-darwin-aarch64", - "/jdk/zulu-17.0.3-linux", - "/jdk/zulu-17.0.3-linux-aarch64", - "/jdk/zulu-17.0.3-windows", - "/jdk/zulu-17.0.3-windows-aarch64", - "/jdk/zulu-17.0.4-darwin", - "/jdk/zulu-17.0.4-darwin-aarch64", - "/jdk/zulu-17.0.4-linux", - "/jdk/zulu-17.0.4-linux-aarch64", - "/jdk/zulu-17.0.4-windows", - "/jdk/zulu-17.0.4-windows-aarch64", - "/jdk/zulu-17.0.4.1-darwin", - "/jdk/zulu-17.0.4.1-darwin-aarch64", - "/jdk/zulu-17.0.4.1-linux", - "/jdk/zulu-17.0.4.1-linux-aarch64", - "/jdk/zulu-17.0.4.1-windows", - "/jdk/zulu-17.0.4.1-windows-aarch64", - "/jdk/zulu-17.0.5-darwin", - "/jdk/zulu-17.0.5-darwin-aarch64", - "/jdk/zulu-17.0.5-linux", - "/jdk/zulu-17.0.5-linux-aarch64", - "/jdk/zulu-17.0.5-windows", - "/jdk/zulu-17.0.5-windows-aarch64", - "/jdk/zulu-17.0.6-darwin", - "/jdk/zulu-17.0.6-darwin-aarch64", - "/jdk/zulu-17.0.6-linux", - "/jdk/zulu-17.0.6-linux-aarch64", - "/jdk/zulu-17.0.6-windows", - "/jdk/zulu-17.0.6-windows-aarch64", - "/jdk/zulu-17.0.7-darwin", - "/jdk/zulu-17.0.7-darwin-aarch64", - "/jdk/zulu-17.0.7-linux", - "/jdk/zulu-17.0.7-linux-aarch64", - "/jdk/zulu-17.0.7-windows", - "/jdk/zulu-17.0.7-windows-aarch64", - "/jdk/zulu-17.0.8-darwin", - "/jdk/zulu-17.0.8-darwin-aarch64", - "/jdk/zulu-17.0.8-linux", - "/jdk/zulu-17.0.8-linux-aarch64", - "/jdk/zulu-17.0.8-windows", - "/jdk/zulu-17.0.8-windows-aarch64", - "/jdk/zulu-17.0.8.1-darwin", - "/jdk/zulu-17.0.8.1-darwin-aarch64", - "/jdk/zulu-17.0.8.1-linux", - "/jdk/zulu-17.0.8.1-linux-aarch64", - "/jdk/zulu-17.0.8.1-windows", - "/jdk/zulu-17.0.8.1-windows-aarch64", - "/jdk/zulu-17.0.9-darwin", - "/jdk/zulu-17.0.9-darwin-aarch64", - "/jdk/zulu-17.0.9-linux", - "/jdk/zulu-17.0.9-linux-aarch64", - "/jdk/zulu-17.0.9-windows", - "/jdk/zulu-17.0.9-windows-aarch64", - "/jdk/zulu-18.0.0-darwin", - "/jdk/zulu-18.0.0-darwin-aarch64", - "/jdk/zulu-18.0.0-linux", - "/jdk/zulu-18.0.0-linux-aarch64", - "/jdk/zulu-18.0.0-windows", - "/jdk/zulu-18.0.0-windows-aarch64", - "/jdk/zulu-18.0.1-darwin", - "/jdk/zulu-18.0.1-darwin-aarch64", - "/jdk/zulu-18.0.1-linux", - "/jdk/zulu-18.0.1-linux-aarch64", - "/jdk/zulu-18.0.1-windows", - "/jdk/zulu-18.0.1-windows-aarch64", - "/jdk/zulu-18.0.2-darwin", - "/jdk/zulu-18.0.2-darwin-aarch64", - "/jdk/zulu-18.0.2-linux", - "/jdk/zulu-18.0.2-linux-aarch64", - "/jdk/zulu-18.0.2-windows", - "/jdk/zulu-18.0.2-windows-aarch64", - "/jdk/zulu-18.0.2.1-darwin", - "/jdk/zulu-18.0.2.1-darwin-aarch64", - "/jdk/zulu-18.0.2.1-linux", - "/jdk/zulu-18.0.2.1-linux-aarch64", - "/jdk/zulu-18.0.2.1-windows", - "/jdk/zulu-18.0.2.1-windows-aarch64", - "/jdk/zulu-19.0.0-darwin", - "/jdk/zulu-19.0.0-darwin-aarch64", - "/jdk/zulu-19.0.0-linux", - "/jdk/zulu-19.0.0-linux-aarch64", - "/jdk/zulu-19.0.0-windows", - "/jdk/zulu-19.0.0-windows-aarch64", - "/jdk/zulu-19.0.1-darwin", - "/jdk/zulu-19.0.1-darwin-aarch64", - "/jdk/zulu-19.0.1-linux", - "/jdk/zulu-19.0.1-linux-aarch64", - "/jdk/zulu-19.0.1-windows", - "/jdk/zulu-19.0.2-darwin", - "/jdk/zulu-19.0.2-darwin-aarch64", - "/jdk/zulu-19.0.2-linux", - "/jdk/zulu-19.0.2-linux-aarch64", - "/jdk/zulu-19.0.2-windows", - "/jdk/zulu-20.0.0-darwin", - "/jdk/zulu-20.0.0-darwin-aarch64", - "/jdk/zulu-20.0.0-linux", - "/jdk/zulu-20.0.0-linux-aarch64", - "/jdk/zulu-20.0.0-windows", - "/jdk/zulu-20.0.1-darwin", - "/jdk/zulu-20.0.1-darwin-aarch64", - "/jdk/zulu-20.0.1-linux", - "/jdk/zulu-20.0.1-linux-aarch64", - "/jdk/zulu-20.0.1-windows", - "/jdk/zulu-20.0.2-darwin", - "/jdk/zulu-20.0.2-darwin-aarch64", - "/jdk/zulu-20.0.2-linux", - "/jdk/zulu-20.0.2-linux-aarch64", - "/jdk/zulu-20.0.2-windows", - "/jdk/zulu-21.0.0-darwin", - "/jdk/zulu-21.0.0-darwin-aarch64", - "/jdk/zulu-21.0.0-linux", - "/jdk/zulu-21.0.0-linux-aarch64", - "/jdk/zulu-21.0.0-windows", - "/jdk/zulu-21.0.1-darwin", - "/jdk/zulu-21.0.1-darwin-aarch64", - "/jdk/zulu-21.0.1-linux", - "/jdk/zulu-21.0.1-linux-aarch64", - "/jdk/zulu-21.0.1-windows", - "/jdk/zulu-21.0.2-darwin", - "/jdk/zulu-21.0.2-darwin-aarch64", - "/jdk/zulu-21.0.2-linux", - "/jdk/zulu-21.0.2-linux-aarch64", - "/jdk/zulu-21.0.2-windows", - "/jdk/zulu-21.0.3-darwin", - "/jdk/zulu-21.0.3-darwin-aarch64", - "/jdk/zulu-21.0.3-linux", - "/jdk/zulu-21.0.3-linux-aarch64", - "/jdk/zulu-21.0.3-windows", - "/jdk/zulu-21.0.3-windows-aarch64", - "/jdk/zulu-22.0.0-darwin", - "/jdk/zulu-22.0.0-darwin-aarch64", - "/jdk/zulu-22.0.0-linux", - "/jdk/zulu-22.0.0-linux-aarch64", - "/jdk/zulu-22.0.0-windows", - "/jdk/zulu-22.0.1-darwin", - "/jdk/zulu-22.0.1-darwin-aarch64", - "/jdk/zulu-22.0.1-linux", - "/jdk/zulu-22.0.1-linux-aarch64", - "/jdk/zulu-22.0.1-windows", - "/jdk/zulu-6.0.103-linux", - "/jdk/zulu-6.0.103-windows", - "/jdk/zulu-6.0.107-linux", - "/jdk/zulu-6.0.107-windows", - "/jdk/zulu-6.0.113-linux", - "/jdk/zulu-6.0.113-windows", - "/jdk/zulu-6.0.119-linux", - "/jdk/zulu-6.0.119-windows", - "/jdk/zulu-6.0.47-windows", - "/jdk/zulu-6.0.49-windows", - "/jdk/zulu-6.0.53-windows", - "/jdk/zulu-6.0.56-windows", - "/jdk/zulu-6.0.59-windows", - "/jdk/zulu-6.0.63-windows", - "/jdk/zulu-6.0.69-windows", - "/jdk/zulu-6.0.73-windows", - "/jdk/zulu-6.0.77-linux", - "/jdk/zulu-6.0.77-windows", - "/jdk/zulu-6.0.79-linux", - "/jdk/zulu-6.0.79-windows", - "/jdk/zulu-6.0.83-linux", - "/jdk/zulu-6.0.83-windows", - "/jdk/zulu-6.0.87-linux", - "/jdk/zulu-6.0.87-windows", - "/jdk/zulu-6.0.89-linux", - "/jdk/zulu-6.0.89-windows", - "/jdk/zulu-6.0.93-linux", - "/jdk/zulu-6.0.93-windows", - "/jdk/zulu-6.0.97-linux", - "/jdk/zulu-6.0.97-windows", - "/jdk/zulu-6.0.99-linux", - "/jdk/zulu-6.0.99-windows", - "/jdk/zulu-7.0.101-darwin", - "/jdk/zulu-7.0.101-linux", - "/jdk/zulu-7.0.101-windows", - "/jdk/zulu-7.0.111-darwin", - "/jdk/zulu-7.0.111-linux", - "/jdk/zulu-7.0.111-windows", - "/jdk/zulu-7.0.121-darwin", - "/jdk/zulu-7.0.121-linux", - "/jdk/zulu-7.0.121-windows", - "/jdk/zulu-7.0.131-darwin", - "/jdk/zulu-7.0.131-linux", - "/jdk/zulu-7.0.131-windows", - "/jdk/zulu-7.0.141-darwin", - "/jdk/zulu-7.0.141-linux", - "/jdk/zulu-7.0.141-windows", - "/jdk/zulu-7.0.154-darwin", - "/jdk/zulu-7.0.154-linux", - "/jdk/zulu-7.0.154-windows", - "/jdk/zulu-7.0.161-darwin", - "/jdk/zulu-7.0.161-linux", - "/jdk/zulu-7.0.161-windows", - "/jdk/zulu-7.0.171-darwin", - "/jdk/zulu-7.0.171-linux", - "/jdk/zulu-7.0.171-windows", - "/jdk/zulu-7.0.181-darwin", - "/jdk/zulu-7.0.181-linux", - "/jdk/zulu-7.0.181-windows", - "/jdk/zulu-7.0.191-darwin", - "/jdk/zulu-7.0.191-linux", - "/jdk/zulu-7.0.191-windows", - "/jdk/zulu-7.0.201-darwin", - "/jdk/zulu-7.0.201-linux", - "/jdk/zulu-7.0.201-windows", - "/jdk/zulu-7.0.211-darwin", - "/jdk/zulu-7.0.211-linux", - "/jdk/zulu-7.0.211-windows", - "/jdk/zulu-7.0.222-darwin", - "/jdk/zulu-7.0.222-linux", - "/jdk/zulu-7.0.222-windows", - "/jdk/zulu-7.0.232-darwin", - "/jdk/zulu-7.0.232-linux", - "/jdk/zulu-7.0.232-windows", - "/jdk/zulu-7.0.242-darwin", - "/jdk/zulu-7.0.242-linux", - "/jdk/zulu-7.0.242-windows", - "/jdk/zulu-7.0.252-darwin", - "/jdk/zulu-7.0.252-linux", - "/jdk/zulu-7.0.252-windows", - "/jdk/zulu-7.0.262-darwin", - "/jdk/zulu-7.0.262-linux", - "/jdk/zulu-7.0.262-windows", - "/jdk/zulu-7.0.272-darwin", - "/jdk/zulu-7.0.272-linux", - "/jdk/zulu-7.0.272-windows", - "/jdk/zulu-7.0.282-darwin", - "/jdk/zulu-7.0.282-linux", - "/jdk/zulu-7.0.282-windows", - "/jdk/zulu-7.0.285-darwin", - "/jdk/zulu-7.0.285-linux", - "/jdk/zulu-7.0.285-windows", - "/jdk/zulu-7.0.292-darwin", - "/jdk/zulu-7.0.292-linux", - "/jdk/zulu-7.0.292-windows", - "/jdk/zulu-7.0.302-darwin", - "/jdk/zulu-7.0.302-linux", - "/jdk/zulu-7.0.302-windows", - "/jdk/zulu-7.0.312-darwin", - "/jdk/zulu-7.0.312-linux", - "/jdk/zulu-7.0.312-windows", - "/jdk/zulu-7.0.322-darwin", - "/jdk/zulu-7.0.322-linux", - "/jdk/zulu-7.0.322-windows", - "/jdk/zulu-7.0.332-darwin", - "/jdk/zulu-7.0.332-linux", - "/jdk/zulu-7.0.332-windows", - "/jdk/zulu-7.0.342-darwin", - "/jdk/zulu-7.0.342-linux", - "/jdk/zulu-7.0.342-windows", - "/jdk/zulu-7.0.352-darwin", - "/jdk/zulu-7.0.352-linux", - "/jdk/zulu-7.0.352-windows", - "/jdk/zulu-7.0.45-windows", - "/jdk/zulu-7.0.51-windows", - "/jdk/zulu-7.0.55-windows", - "/jdk/zulu-7.0.60-windows", - "/jdk/zulu-7.0.65-darwin", - "/jdk/zulu-7.0.65-windows", - "/jdk/zulu-7.0.72-darwin", - "/jdk/zulu-7.0.72-windows", - "/jdk/zulu-7.0.76-darwin", - "/jdk/zulu-7.0.76-windows", - "/jdk/zulu-7.0.79-darwin", - "/jdk/zulu-7.0.79-windows", - "/jdk/zulu-7.0.80-darwin", - "/jdk/zulu-7.0.80-windows", - "/jdk/zulu-7.0.85-darwin", - "/jdk/zulu-7.0.85-windows", - "/jdk/zulu-7.0.95-darwin", - "/jdk/zulu-7.0.95-linux", - "/jdk/zulu-7.0.95-windows", - "/jdk/zulu-8.0.0-windows", - "/jdk/zulu-8.0.101-windows", - "/jdk/zulu-8.0.102-darwin", - "/jdk/zulu-8.0.102-linux", - "/jdk/zulu-8.0.102-windows", - "/jdk/zulu-8.0.11-darwin", - "/jdk/zulu-8.0.11-windows", - "/jdk/zulu-8.0.112-darwin", - "/jdk/zulu-8.0.112-linux", - "/jdk/zulu-8.0.112-windows", - "/jdk/zulu-8.0.121-darwin", - "/jdk/zulu-8.0.121-linux", - "/jdk/zulu-8.0.121-windows", - "/jdk/zulu-8.0.131-darwin", - "/jdk/zulu-8.0.131-linux", - "/jdk/zulu-8.0.131-windows", - "/jdk/zulu-8.0.144-darwin", - "/jdk/zulu-8.0.144-linux", - "/jdk/zulu-8.0.144-windows", - "/jdk/zulu-8.0.152-darwin", - "/jdk/zulu-8.0.152-linux", - "/jdk/zulu-8.0.152-windows", - "/jdk/zulu-8.0.162-darwin", - "/jdk/zulu-8.0.162-linux", - "/jdk/zulu-8.0.162-windows", - "/jdk/zulu-8.0.163-darwin", - "/jdk/zulu-8.0.163-linux", - "/jdk/zulu-8.0.163-windows", - "/jdk/zulu-8.0.172-darwin", - "/jdk/zulu-8.0.172-linux", - "/jdk/zulu-8.0.172-windows", - "/jdk/zulu-8.0.181-darwin", - "/jdk/zulu-8.0.181-linux", - "/jdk/zulu-8.0.181-windows", - "/jdk/zulu-8.0.192-darwin", - "/jdk/zulu-8.0.192-linux", - "/jdk/zulu-8.0.192-windows", - "/jdk/zulu-8.0.20-darwin", - "/jdk/zulu-8.0.20-windows", - "/jdk/zulu-8.0.201-darwin", - "/jdk/zulu-8.0.201-linux", - "/jdk/zulu-8.0.201-windows", - "/jdk/zulu-8.0.202-darwin", - "/jdk/zulu-8.0.202-linux", - "/jdk/zulu-8.0.202-windows", - "/jdk/zulu-8.0.212-darwin", - "/jdk/zulu-8.0.212-linux", - "/jdk/zulu-8.0.212-windows", - "/jdk/zulu-8.0.222-darwin", - "/jdk/zulu-8.0.222-linux", - "/jdk/zulu-8.0.222-windows", - "/jdk/zulu-8.0.232-darwin", - "/jdk/zulu-8.0.232-linux", - "/jdk/zulu-8.0.232-windows", - "/jdk/zulu-8.0.242-darwin", - "/jdk/zulu-8.0.242-linux", - "/jdk/zulu-8.0.242-windows", - "/jdk/zulu-8.0.25-darwin", - "/jdk/zulu-8.0.25-windows", - "/jdk/zulu-8.0.252-darwin", - "/jdk/zulu-8.0.252-linux", - "/jdk/zulu-8.0.252-linux-aarch64", - "/jdk/zulu-8.0.252-windows", - "/jdk/zulu-8.0.262-darwin", - "/jdk/zulu-8.0.262-linux", - "/jdk/zulu-8.0.262-linux-aarch64", - "/jdk/zulu-8.0.262-windows", - "/jdk/zulu-8.0.265-darwin", - "/jdk/zulu-8.0.265-linux", - "/jdk/zulu-8.0.265-linux-aarch64", - "/jdk/zulu-8.0.265-windows", - "/jdk/zulu-8.0.272-darwin", - "/jdk/zulu-8.0.272-linux", - "/jdk/zulu-8.0.272-windows", - "/jdk/zulu-8.0.275-darwin", - "/jdk/zulu-8.0.275-linux", - "/jdk/zulu-8.0.275-windows", - "/jdk/zulu-8.0.282-darwin", - "/jdk/zulu-8.0.282-darwin-aarch64", - "/jdk/zulu-8.0.282-linux", - "/jdk/zulu-8.0.282-windows", - "/jdk/zulu-8.0.292-darwin", - "/jdk/zulu-8.0.292-darwin-aarch64", - "/jdk/zulu-8.0.292-linux", - "/jdk/zulu-8.0.292-windows", - "/jdk/zulu-8.0.302-darwin", - "/jdk/zulu-8.0.302-darwin-aarch64", - "/jdk/zulu-8.0.302-linux", - "/jdk/zulu-8.0.302-windows", - "/jdk/zulu-8.0.31-darwin", - "/jdk/zulu-8.0.31-windows", - "/jdk/zulu-8.0.312-darwin", - "/jdk/zulu-8.0.312-darwin-aarch64", - "/jdk/zulu-8.0.312-linux", - "/jdk/zulu-8.0.312-windows", - "/jdk/zulu-8.0.322-darwin", - "/jdk/zulu-8.0.322-darwin-aarch64", - "/jdk/zulu-8.0.322-linux", - "/jdk/zulu-8.0.322-windows", - "/jdk/zulu-8.0.332-darwin", - "/jdk/zulu-8.0.332-darwin-aarch64", - "/jdk/zulu-8.0.332-linux", - "/jdk/zulu-8.0.332-windows", - "/jdk/zulu-8.0.342-darwin", - "/jdk/zulu-8.0.342-darwin-aarch64", - "/jdk/zulu-8.0.342-linux", - "/jdk/zulu-8.0.342-windows", - "/jdk/zulu-8.0.345-darwin", - "/jdk/zulu-8.0.345-darwin-aarch64", - "/jdk/zulu-8.0.345-linux", - "/jdk/zulu-8.0.345-windows", - "/jdk/zulu-8.0.352-darwin", - "/jdk/zulu-8.0.352-darwin-aarch64", - "/jdk/zulu-8.0.352-linux", - "/jdk/zulu-8.0.352-windows", - "/jdk/zulu-8.0.362-darwin", - "/jdk/zulu-8.0.362-darwin-aarch64", - "/jdk/zulu-8.0.362-linux", - "/jdk/zulu-8.0.362-windows", - "/jdk/zulu-8.0.372-darwin", - "/jdk/zulu-8.0.372-darwin-aarch64", - "/jdk/zulu-8.0.372-linux", - "/jdk/zulu-8.0.372-windows", - "/jdk/zulu-8.0.382-darwin", - "/jdk/zulu-8.0.382-darwin-aarch64", - "/jdk/zulu-8.0.382-linux", - "/jdk/zulu-8.0.382-windows", - "/jdk/zulu-8.0.392-darwin", - "/jdk/zulu-8.0.392-darwin-aarch64", - "/jdk/zulu-8.0.392-linux", - "/jdk/zulu-8.0.392-linux-aarch64", - "/jdk/zulu-8.0.392-windows", - "/jdk/zulu-8.0.40-windows", - "/jdk/zulu-8.0.402-darwin", - "/jdk/zulu-8.0.402-darwin-aarch64", - "/jdk/zulu-8.0.402-linux", - "/jdk/zulu-8.0.402-windows", - "/jdk/zulu-8.0.412-darwin", - "/jdk/zulu-8.0.412-darwin-aarch64", - "/jdk/zulu-8.0.412-linux", - "/jdk/zulu-8.0.412-linux-aarch64", - "/jdk/zulu-8.0.412-windows", - "/jdk/zulu-8.0.45-darwin", - "/jdk/zulu-8.0.45-windows", - "/jdk/zulu-8.0.5-windows", - "/jdk/zulu-8.0.51-darwin", - "/jdk/zulu-8.0.51-windows", - "/jdk/zulu-8.0.60-darwin", - "/jdk/zulu-8.0.60-windows", - "/jdk/zulu-8.0.65-darwin", - "/jdk/zulu-8.0.65-windows", - "/jdk/zulu-8.0.66-darwin", - "/jdk/zulu-8.0.66-windows", - "/jdk/zulu-8.0.71-darwin", - "/jdk/zulu-8.0.71-linux", - "/jdk/zulu-8.0.71-windows", - "/jdk/zulu-8.0.72-darwin", - "/jdk/zulu-8.0.72-linux", - "/jdk/zulu-8.0.72-windows", - "/jdk/zulu-8.0.91-darwin", - "/jdk/zulu-8.0.91-linux", - "/jdk/zulu-8.0.91-windows", - "/jdk/zulu-8.0.92-darwin", - "/jdk/zulu-8.0.92-linux", - "/jdk/zulu-8.0.92-windows", - "/jdk/zulu-9.0.0-darwin", - "/jdk/zulu-9.0.0-linux", - "/jdk/zulu-9.0.0-windows", - "/jdk/zulu-9.0.1-darwin", - "/jdk/zulu-9.0.1-linux", - "/jdk/zulu-9.0.1-windows", - "/jdk/zulu-9.0.4-darwin", - "/jdk/zulu-9.0.4-linux", - "/jdk/zulu-9.0.4-windows", - "/jdk/zulu-9.0.7-darwin", - "/jdk/zulu-9.0.7-linux", - "/jdk/zulu-9.0.7-windows", - "/jdk/latest_adoptiumjdk_11_darwin", - "/jdk/latest_adoptiumjdk_11_darwin_aarch64", - "/jdk/latest_adoptiumjdk_11_linux", - "/jdk/latest_adoptiumjdk_11_linux_aarch64", - "/jdk/latest_adoptiumjdk_11_windows", - "/jdk/latest_adoptiumjdk_11_windows_x86_32", - "/jdk/latest_adoptiumjdk_16_darwin", - "/jdk/latest_adoptiumjdk_16_linux", - "/jdk/latest_adoptiumjdk_16_linux_aarch64", - "/jdk/latest_adoptiumjdk_16_windows", - "/jdk/latest_adoptiumjdk_16_windows_x86_32", - "/jdk/latest_adoptiumjdk_17_darwin", - "/jdk/latest_adoptiumjdk_17_darwin_aarch64", - "/jdk/latest_adoptiumjdk_17_linux", - "/jdk/latest_adoptiumjdk_17_linux_aarch64", - "/jdk/latest_adoptiumjdk_17_windows", - "/jdk/latest_adoptiumjdk_17_windows_x86_32", - "/jdk/latest_adoptiumjdk_21_darwin", - "/jdk/latest_adoptiumjdk_21_darwin_aarch64", - "/jdk/latest_adoptiumjdk_21_linux", - "/jdk/latest_adoptiumjdk_21_linux_aarch64", - "/jdk/latest_adoptiumjdk_21_windows", - "/jdk/latest_adoptiumjdk_8_darwin", - "/jdk/latest_adoptiumjdk_8_linux", - "/jdk/latest_adoptiumjdk_8_linux_aarch64", - "/jdk/latest_adoptiumjdk_8_windows", - "/jdk/latest_adoptiumjdk_8_windows_x86_32", - "/jdk/latest_adoptopenjdk-openj9_11_linux", - "/jdk/latest_adoptopenjdk-openj9_11_windows", - "/jdk/latest_adoptopenjdk-openj9_8_linux", - "/jdk/latest_adoptopenjdk-openj9_8_windows", - "/jdk/latest_adoptopenjdk_11_darwin", - "/jdk/latest_adoptopenjdk_11_linux", - "/jdk/latest_adoptopenjdk_11_linux_aarch64", - "/jdk/latest_adoptopenjdk_11_windows", - "/jdk/latest_adoptopenjdk_11_windows_x86_32", - "/jdk/latest_adoptopenjdk_12_darwin", - "/jdk/latest_adoptopenjdk_12_linux", - "/jdk/latest_adoptopenjdk_12_linux_aarch64", - "/jdk/latest_adoptopenjdk_12_windows", - "/jdk/latest_adoptopenjdk_12_windows_x86_32", - "/jdk/latest_adoptopenjdk_13_darwin", - "/jdk/latest_adoptopenjdk_13_linux", - "/jdk/latest_adoptopenjdk_13_linux_aarch64", - "/jdk/latest_adoptopenjdk_13_windows", - "/jdk/latest_adoptopenjdk_13_windows_x86_32", - "/jdk/latest_adoptopenjdk_14_darwin", - "/jdk/latest_adoptopenjdk_14_linux", - "/jdk/latest_adoptopenjdk_14_linux_aarch64", - "/jdk/latest_adoptopenjdk_14_windows", - "/jdk/latest_adoptopenjdk_14_windows_x86_32", - "/jdk/latest_adoptopenjdk_15_darwin", - "/jdk/latest_adoptopenjdk_15_linux", - "/jdk/latest_adoptopenjdk_15_linux_aarch64", - "/jdk/latest_adoptopenjdk_15_windows", - "/jdk/latest_adoptopenjdk_15_windows_x86_32", - "/jdk/latest_adoptopenjdk_16_darwin", - "/jdk/latest_adoptopenjdk_16_linux", - "/jdk/latest_adoptopenjdk_16_linux_aarch64", - "/jdk/latest_adoptopenjdk_16_windows", - "/jdk/latest_adoptopenjdk_16_windows_x86_32", - "/jdk/latest_adoptopenjdk_8_darwin", - "/jdk/latest_adoptopenjdk_8_linux", - "/jdk/latest_adoptopenjdk_8_linux_aarch64", - "/jdk/latest_adoptopenjdk_8_windows", - "/jdk/latest_adoptopenjdk_8_windows_x86_32", - "/jdk/latest_amazon-corretto_11_darwin", - "/jdk/latest_amazon-corretto_11_darwin_aarch64", - "/jdk/latest_amazon-corretto_11_linux", - "/jdk/latest_amazon-corretto_11_linux_aarch64", - "/jdk/latest_amazon-corretto_11_windows", - "/jdk/latest_amazon-corretto_17_darwin", - "/jdk/latest_amazon-corretto_17_darwin_aarch64", - "/jdk/latest_amazon-corretto_17_linux", - "/jdk/latest_amazon-corretto_17_linux_aarch64", - "/jdk/latest_amazon-corretto_17_windows", - "/jdk/latest_amazon-corretto_21_darwin", - "/jdk/latest_amazon-corretto_21_darwin_aarch64", - "/jdk/latest_amazon-corretto_21_linux", - "/jdk/latest_amazon-corretto_21_linux_aarch64", - "/jdk/latest_amazon-corretto_21_windows", - "/jdk/latest_amazon-corretto_8_darwin", - "/jdk/latest_amazon-corretto_8_darwin_aarch64", - "/jdk/latest_amazon-corretto_8_linux", - "/jdk/latest_amazon-corretto_8_linux_aarch64", - "/jdk/latest_amazon-corretto_8_windows", - "/jdk/latest_graalvm-ce_11_darwin", - "/jdk/latest_graalvm-ce_11_darwin_aarch64", - "/jdk/latest_graalvm-ce_11_linux", - "/jdk/latest_graalvm-ce_11_linux_aarch64", - "/jdk/latest_graalvm-ce_11_windows", - "/jdk/latest_graalvm-ce_17_darwin", - "/jdk/latest_graalvm-ce_17_darwin_aarch64", - "/jdk/latest_graalvm-ce_17_linux", - "/jdk/latest_graalvm-ce_17_linux_aarch64", - "/jdk/latest_graalvm-ce_17_windows", - "/jdk/latest_ibm_8_linux", - "/jdk/latest_openjdk_10_darwin", - "/jdk/latest_openjdk_10_linux", - "/jdk/latest_openjdk_10_windows", - "/jdk/latest_openjdk_11_darwin", - "/jdk/latest_openjdk_11_linux", - "/jdk/latest_openjdk_11_windows", - "/jdk/latest_openjdk_12_darwin", - "/jdk/latest_openjdk_12_linux", - "/jdk/latest_openjdk_12_windows", - "/jdk/latest_openjdk_13_darwin", - "/jdk/latest_openjdk_13_linux", - "/jdk/latest_openjdk_13_windows", - "/jdk/latest_openjdk_14_darwin", - "/jdk/latest_openjdk_14_linux", - "/jdk/latest_openjdk_14_windows", - "/jdk/latest_openjdk_15_darwin", - "/jdk/latest_openjdk_15_linux", - "/jdk/latest_openjdk_15_linux_aarch64", - "/jdk/latest_openjdk_15_windows", - "/jdk/latest_openjdk_16_darwin", - "/jdk/latest_openjdk_16_linux", - "/jdk/latest_openjdk_16_linux_aarch64", - "/jdk/latest_openjdk_16_windows", - "/jdk/latest_openjdk_17_darwin", - "/jdk/latest_openjdk_17_darwin_aarch64", - "/jdk/latest_openjdk_17_linux", - "/jdk/latest_openjdk_17_linux_aarch64", - "/jdk/latest_openjdk_17_windows", - "/jdk/latest_openjdk_18_darwin", - "/jdk/latest_openjdk_18_darwin_aarch64", - "/jdk/latest_openjdk_18_linux", - "/jdk/latest_openjdk_18_linux_aarch64", - "/jdk/latest_openjdk_18_windows", - "/jdk/latest_openjdk_19_darwin", - "/jdk/latest_openjdk_19_darwin_aarch64", - "/jdk/latest_openjdk_19_linux", - "/jdk/latest_openjdk_19_linux_aarch64", - "/jdk/latest_openjdk_19_windows", - "/jdk/latest_openjdk_20_darwin", - "/jdk/latest_openjdk_20_darwin_aarch64", - "/jdk/latest_openjdk_20_linux", - "/jdk/latest_openjdk_20_linux_aarch64", - "/jdk/latest_openjdk_20_windows", - "/jdk/latest_openjdk_21_darwin", - "/jdk/latest_openjdk_21_darwin_aarch64", - "/jdk/latest_openjdk_21_linux", - "/jdk/latest_openjdk_21_linux_aarch64", - "/jdk/latest_openjdk_21_windows", - "/jdk/latest_openjdk_22_darwin", - "/jdk/latest_openjdk_22_darwin_aarch64", - "/jdk/latest_openjdk_22_linux", - "/jdk/latest_openjdk_22_linux_aarch64", - "/jdk/latest_openjdk_22_windows", - "/jdk/latest_openjdk_23_darwin", - "/jdk/latest_openjdk_23_darwin_aarch64", - "/jdk/latest_openjdk_23_linux", - "/jdk/latest_openjdk_23_linux_aarch64", - "/jdk/latest_openjdk_23_windows", - "/jdk/latest_openjdk_9_darwin", - "/jdk/latest_openjdk_9_linux", - "/jdk/latest_openjdk_9_windows", - "/jdk/latest_oracle_10_darwin", - "/jdk/latest_oracle_10_linux", - "/jdk/latest_oracle_10_windows", - "/jdk/latest_oracle_11_darwin", - "/jdk/latest_oracle_11_linux", - "/jdk/latest_oracle_11_linux_aarch64", - "/jdk/latest_oracle_11_windows", - "/jdk/latest_oracle_12_darwin", - "/jdk/latest_oracle_12_linux", - "/jdk/latest_oracle_12_windows", - "/jdk/latest_oracle_13_darwin", - "/jdk/latest_oracle_13_linux", - "/jdk/latest_oracle_13_windows", - "/jdk/latest_oracle_16_darwin", - "/jdk/latest_oracle_16_linux", - "/jdk/latest_oracle_16_linux_aarch64", - "/jdk/latest_oracle_16_windows", - "/jdk/latest_oracle_7_darwin", - "/jdk/latest_oracle_7_linux", - "/jdk/latest_oracle_7_windows", - "/jdk/latest_oracle_8_darwin", - "/jdk/latest_oracle_8_linux", - "/jdk/latest_oracle_8_linux_aarch64", - "/jdk/latest_oracle_8_linux_x86_32", - "/jdk/latest_oracle_8_windows", - "/jdk/latest_oracle_8_windows_x86_32", - "/jdk/latest_oracle_9_darwin", - "/jdk/latest_oracle_9_linux", - "/jdk/latest_oracle_9_windows", - "/jdk/latest_sap_8_linux", - "/jdk/latest_zulu_10_darwin", - "/jdk/latest_zulu_10_linux", - "/jdk/latest_zulu_10_windows", - "/jdk/latest_zulu_11_darwin", - "/jdk/latest_zulu_11_darwin_aarch64", - "/jdk/latest_zulu_11_linux", - "/jdk/latest_zulu_11_linux_aarch64", - "/jdk/latest_zulu_11_windows", - "/jdk/latest_zulu_12_darwin", - "/jdk/latest_zulu_12_linux", - "/jdk/latest_zulu_12_windows", - "/jdk/latest_zulu_13_darwin", - "/jdk/latest_zulu_13_darwin_aarch64", - "/jdk/latest_zulu_13_linux", - "/jdk/latest_zulu_13_linux_aarch64", - "/jdk/latest_zulu_13_windows", - "/jdk/latest_zulu_14_darwin", - "/jdk/latest_zulu_14_linux", - "/jdk/latest_zulu_14_windows", - "/jdk/latest_zulu_15_darwin", - "/jdk/latest_zulu_15_darwin_aarch64", - "/jdk/latest_zulu_15_linux", - "/jdk/latest_zulu_15_linux_aarch64", - "/jdk/latest_zulu_15_windows", - "/jdk/latest_zulu_16_darwin", - "/jdk/latest_zulu_16_darwin_aarch64", - "/jdk/latest_zulu_16_linux", - "/jdk/latest_zulu_16_linux_aarch64", - "/jdk/latest_zulu_16_windows", - "/jdk/latest_zulu_16_windows_aarch64", - "/jdk/latest_zulu_17_darwin", - "/jdk/latest_zulu_17_darwin_aarch64", - "/jdk/latest_zulu_17_linux", - "/jdk/latest_zulu_17_linux_aarch64", - "/jdk/latest_zulu_17_windows", - "/jdk/latest_zulu_17_windows_aarch64", - "/jdk/latest_zulu_18_darwin", - "/jdk/latest_zulu_18_darwin_aarch64", - "/jdk/latest_zulu_18_linux", - "/jdk/latest_zulu_18_linux_aarch64", - "/jdk/latest_zulu_18_windows", - "/jdk/latest_zulu_18_windows_aarch64", - "/jdk/latest_zulu_19_darwin", - "/jdk/latest_zulu_19_darwin_aarch64", - "/jdk/latest_zulu_19_linux", - "/jdk/latest_zulu_19_linux_aarch64", - "/jdk/latest_zulu_19_windows", - "/jdk/latest_zulu_19_windows_aarch64", - "/jdk/latest_zulu_1_linux_aarch64", - "/jdk/latest_zulu_20_darwin", - "/jdk/latest_zulu_20_darwin_aarch64", - "/jdk/latest_zulu_20_linux", - "/jdk/latest_zulu_20_linux_aarch64", - "/jdk/latest_zulu_20_windows", - "/jdk/latest_zulu_21_darwin", - "/jdk/latest_zulu_21_darwin_aarch64", - "/jdk/latest_zulu_21_linux", - "/jdk/latest_zulu_21_linux_aarch64", - "/jdk/latest_zulu_21_windows", - "/jdk/latest_zulu_21_windows_aarch64", - "/jdk/latest_zulu_22_darwin", - "/jdk/latest_zulu_22_darwin_aarch64", - "/jdk/latest_zulu_22_linux", - "/jdk/latest_zulu_22_linux_aarch64", - "/jdk/latest_zulu_22_windows", - "/jdk/latest_zulu_6_linux", - "/jdk/latest_zulu_6_windows", - "/jdk/latest_zulu_7_darwin", - "/jdk/latest_zulu_7_linux", - "/jdk/latest_zulu_7_windows", - "/jdk/latest_zulu_8_darwin", - "/jdk/latest_zulu_8_darwin_aarch64", - "/jdk/latest_zulu_8_linux", - "/jdk/latest_zulu_8_linux_aarch64", - "/jdk/latest_zulu_8_windows", - "/jdk/latest_zulu_9_darwin", - "/jdk/latest_zulu_9_linux", - "/jdk/latest_zulu_9_windows" - ] -} diff --git a/list-plain-deps.groovy b/list-plain-deps.groovy deleted file mode 100644 index e7a863d9a8cee..0000000000000 --- a/list-plain-deps.groovy +++ /dev/null @@ -1,68 +0,0 @@ -import java.nio.file.* -import java.nio.charset.StandardCharsets -import java.util.regex.Pattern - -def parseGradleFiles(Path directory) { - List configurations = ['api', - 'implementation', - "testImplementation", - "testRuntimeOnly", - "runtimeOnly"] - - def configsRexex = configurations.join('|') - def pattern = Pattern.compile(/(\w+)\s+['"](\w[^'"]+):([^'"]+):([^'"]+)['"]/) - def dependencies = [] - - Files.walk(directory).each { path -> - if (Files.isRegularFile(path) && path.toString().endsWith('.gradle')) { - def lines = Files.readAllLines(path, StandardCharsets.UTF_8) - lines.each { line -> - def matcher = pattern.matcher(line) - if (matcher.find()) { - def configuration = matcher.group(1) - def group = matcher.group(2) - def name = matcher.group(3) - def version = matcher.group(4) - dependencies << [file: path.toString(), configuration: configuration, group: group, name: name, version: version] - } - } - } - } - return dependencies -} - -String convertToVersionCatalogEntry(def dependencies) { - Set versions = new TreeSet<>() - Set entries = new TreeSet<>() - -} - -def main() { - // def directoryPath = System.console().readLine('Enter the directory path to search for *.gradle files: ').trim() - // def directory = Paths.get("directoryPath") - def directory = Paths.get("/Users/rene/dev/elastic/elasticsearch") - - if (!Files.exists(directory) || !Files.isDirectory(directory)) { - println "The directory '${directoryPath}' does not exist or is not a directory." - return - } - - def dependencies = parseGradleFiles(directory) - if (dependencies) { - def depsByFile = dependencies.groupBy {it.file} - depsByFile.each { file, deps -> - println "File: ${file}" - deps.each { dep -> - println "${dep.configuration} '${dep.group}:${dep.name}:${dep.version}'" - } - println "" - } - - println "Found ${dependencies.size()} dependencies in ${depsByFile.size()} files." - - } else { - println "No dependencies found." - } -} - -main() \ No newline at end of file diff --git a/versions.log b/versions.log deleted file mode 100644 index 10a88d7d1172c..0000000000000 --- a/versions.log +++ /dev/null @@ -1,595 +0,0 @@ -Loaded version property: protobuf = 3.21.9 -Loaded version property: junit5 = 5.7.1 -Loaded version property: commons_lang3 = 3.9 -Loaded version property: jmh = 1.26 -Loaded version property: reflections = 0.10.2 -Loaded version property: lucene = 9.11.1 -Loaded version property: dockerJava = 3.3.4 -Loaded version property: opensaml = 4.3.0 -Loaded version property: commonslogging = 1.2 -Loaded version property: bouncycastle = 1.78.1 -Loaded version property: jackson = 2.15.0 -Loaded version property: elasticsearch = 8.15.0 -Loaded version property: testcontainer = 1.19.2 -Loaded version property: commonscodec = 1.15 -Loaded version property: jna = 5.12.1 -Loaded version property: jimfs = 1.3.0 -Loaded version property: netty = 4.1.109.Final -Loaded version property: log4j = 2.19.0 -Loaded version property: spatial4j = 0.7 -Loaded version property: junit = 4.13.2 -Loaded version property: commonsCompress = 1.24.0 -Loaded version property: slf4j = 2.0.6 -Loaded version property: bundled_jdk_vendor = openjdk -Loaded version property: icu4j = 68.2 -Loaded version property: jts = 1.15.0 -Loaded version property: supercsv = 2.4.0 -Loaded version property: randomizedrunner = 2.8.0 -Loaded version property: httpasyncclient = 4.1.5 -Loaded version property: google_oauth_client = 1.34.1 -Loaded version property: ductTape = 1.0.8 -Loaded version property: antlr4 = 4.13.1 -Loaded version property: jimfs_guava = 32.1.1-jre -Loaded version property: mocksocket = 1.2 -Loaded version property: bundled_jdk = 22.0.1+8@c7ec1332f7bb44aeba2eb341ae18aca4 -Loaded version property: networknt_json_schema_validator = 1.0.48 -Loaded version property: hamcrest = 2.1 -Loaded version property: ecsLogging = 1.2.0 -Loaded version property: snakeyaml = 2.0 -Loaded version property: httpclient = 4.5.14 -Loaded version property: httpcore = 4.4.13 -Version Properties: false -File: /Users/rene/dev/elastic/elasticsearch/test/framework/build.gradle -Resolving version: ${versions.randomizedrunner} -"com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" -> "[group:com.carrotsearch.randomizedtesting, name:randomizedtesting-runner, version:randomizedtesting-runner]" -Resolving version: ${versions.junit} -"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" -Resolving version: ${versions.hamcrest} -"org.hamcrest:hamcrest:${versions.hamcrest}" -> "[group:org.hamcrest, name:hamcrest, version:hamcrest]" -Resolving version: ${versions.lucene} -"org.apache.lucene:lucene-test-framework:${versions.lucene}" -> "[group:org.apache.lucene, name:lucene-test-framework, version:lucene-test-framework]" -Resolving version: ${versions.lucene} -"org.apache.lucene:lucene-codecs:${versions.lucene}" -> "[group:org.apache.lucene, name:lucene-codecs, version:lucene-codecs]" -Resolving version: ${versions.commonslogging} -"commons-logging:commons-logging:${versions.commonslogging}" -> "[group:commons-logging, name:commons-logging, version:commons-logging]" -Resolving version: ${versions.commonscodec} -"commons-codec:commons-codec:${versions.commonscodec}" -> "[group:commons-codec, name:commons-codec, version:commons-codec]" -Resolving version: 5.11.0 -"org.mockito:mockito-core:5.11.0" -> "[group:org.mockito, name:mockito-core, version:mockito-core]" -Resolving version: 5.11.0 -"org.mockito:mockito-subclass:5.11.0" -> "[group:org.mockito, name:mockito-subclass, version:mockito-subclass]" -Resolving version: 1.14.12 -"net.bytebuddy:byte-buddy:1.14.12" -> "[group:net.bytebuddy, name:byte-buddy, version:byte-buddy]" -Resolving version: 3.3 -"org.objenesis:objenesis:3.3" -> "[group:org.objenesis, name:objenesis, version:objenesis]" -Resolving version: ${versions.mocksocket} -"org.elasticsearch:mocksocket:${versions.mocksocket}" -> "[group:org.elasticsearch, name:mocksocket, version:mocksocket]" - -File: /Users/rene/dev/elastic/elasticsearch/test/test-clusters/build.gradle -Resolving version: ${versions.junit} -existingMajor: 4, newMajor: 4 -"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" -Resolving version: ${versions.log4j} -"org.apache.logging.log4j:log4j-api:${versions.log4j}" -> "[group:org.apache.logging.log4j, name:log4j-api, version:log4j-api]" -Resolving version: ${versions.jackson} -"com.fasterxml.jackson.core:jackson-core:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core]" -Resolving version: ${versions.jackson} -"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" -Resolving version: ${versions.jackson} -"com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-databind, version:jackson-databind]" - -File: /Users/rene/dev/elastic/elasticsearch/test/immutable-collections-patch/build.gradle -Resolving version: 9.7 -"org.ow2.asm:asm:9.7" -> "[group:org.ow2.asm, name:asm, version:asm]" -Resolving version: 9.7 -"org.ow2.asm:asm-tree:9.7" -> "[group:org.ow2.asm, name:asm-tree, version:asm-tree]" - -File: /Users/rene/dev/elastic/elasticsearch/test/logger-usage/build.gradle -Resolving version: 9.7 -existingMajor: 9, newMajor: 9 -"org.ow2.asm:asm:9.7" -> "[group:org.ow2.asm, name:asm, version:asm]" -Resolving version: 9.7 -existingMajor: 9, newMajor: 9 -"org.ow2.asm:asm-tree:9.7" -> "[group:org.ow2.asm, name:asm-tree, version:asm-tree]" -Resolving version: 9.7 -"org.ow2.asm:asm-analysis:9.7" -> "[group:org.ow2.asm, name:asm-analysis, version:asm-analysis]" -Resolving version: ${versions.log4j} -existingMajor: 2, newMajor: 2 -"org.apache.logging.log4j:log4j-api:${versions.log4j}" -> "[group:org.apache.logging.log4j, name:log4j-api, version:log4j-api]" - -File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/testcontainer-utils/build.gradle -Resolving version: ${versions.junit} -existingMajor: 4, newMajor: 4 -"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" -Resolving version: ${versions.testcontainer} -"org.testcontainers:testcontainers:${versions.testcontainer}" -> "[group:org.testcontainers, name:testcontainers, version:testcontainers]" -Resolving version: ${versions.randomizedrunner} -existingMajor: 2, newMajor: 2 -"com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" -> "[group:com.carrotsearch.randomizedtesting, name:randomizedtesting-runner, version:randomizedtesting-runner]" -Resolving version: ${versions.dockerJava} -"com.github.docker-java:docker-java-api:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-api, version:docker-java-api]" -Resolving version: ${versions.slf4j} -"org.slf4j:slf4j-api:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-api, version:slf4j-api]" -Resolving version: ${versions.dockerJava} -"com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-transport-zerodep, version:docker-java-transport-zerodep]" -Resolving version: ${versions.dockerJava} -"com.github.docker-java:docker-java-transport:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-transport, version:docker-java-transport]" -Resolving version: ${versions.dockerJava} -"com.github.docker-java:docker-java-core:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-core, version:docker-java-core]" -Resolving version: ${versions.commonsCompress} -"org.apache.commons:commons-compress:${versions.commonsCompress}" -> "[group:org.apache.commons, name:commons-compress, version:commons-compress]" -Resolving version: ${versions.ductTape} -"org.rnorth.duct-tape:duct-tape:${versions.ductTape}" -> "[group:org.rnorth.duct-tape, name:duct-tape, version:duct-tape]" -Resolving version: ${versions.jackson} -existingMajor: 2, newMajor: 2 -"com.fasterxml.jackson.core:jackson-core:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core]" -Resolving version: ${versions.jackson} -existingMajor: 2, newMajor: 2 -"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" - -File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/old-elasticsearch/build.gradle -Resolving version: ${versions.lucene} -"org.apache.lucene:lucene-core:${versions.lucene}" -> "[group:org.apache.lucene, name:lucene-core, version:lucene-core]" - -File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/krb5kdc-fixture/build.gradle -Resolving version: ${versions.junit} -existingMajor: 4, newMajor: 4 -"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" -Resolving version: ${versions.slf4j} -existingMajor: 2, newMajor: 2 -"org.slf4j:slf4j-api:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-api, version:slf4j-api]" -Resolving version: ${versions.dockerJava} -existingMajor: 3, newMajor: 3 -"com.github.docker-java:docker-java-api:${versions.dockerJava}" -> "[group:com.github.docker-java, name:docker-java-api, version:docker-java-api]" -Resolving version: ${versions.jackson} -existingMajor: 2, newMajor: 2 -"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" -Resolving version: ${versions.slf4j} -"org.slf4j:slf4j-simple:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-simple, version:slf4j-simple]" -Resolving version: ${versions.hamcrest} -existingMajor: 2, newMajor: 2 -"org.hamcrest:hamcrest:${versions.hamcrest}" -> "[group:org.hamcrest, name:hamcrest, version:hamcrest]" - -File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/minio-fixture/build.gradle -Resolving version: ${versions.junit} -existingMajor: 4, newMajor: 4 -"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" -Resolving version: ${versions.slf4j} -existingMajor: 2, newMajor: 2 -"org.slf4j:slf4j-simple:${versions.slf4j}" -> "[group:org.slf4j, name:slf4j-simple, version:slf4j-simple]" - -File: /Users/rene/dev/elastic/elasticsearch/test/fixtures/hdfs-fixture/build.gradle -Resolving version: ${versions.junit} -existingMajor: 4, newMajor: 4 -"junit:junit:${versions.junit}" -> "[group:junit, name:junit, version:junit]" -Resolving version: 2.8.5 -"org.apache.hadoop:hadoop-minicluster:2.8.5" -> "[group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster]" -Resolving version: 3.3.1 -existingMajor: 2, newMajor: 3 -"org.apache.hadoop:hadoop-minicluster:3.3.1" -> "[group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster3]" - -File: /Users/rene/dev/elastic/elasticsearch/test/x-content/build.gradle -Resolving version: ${versions.jackson} -existingMajor: 2, newMajor: 2 -"com.fasterxml.jackson.core:jackson-core:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core]" -Resolving version: ${versions.networknt_json_schema_validator} -"com.networknt:json-schema-validator:${versions.networknt_json_schema_validator}" -> "[group:com.networknt, name:json-schema-validator, version:json-schema-validator]" -Resolving version: ${versions.jackson} -existingMajor: 2, newMajor: 2 -"com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-databind, version:jackson-databind]" -Resolving version: ${versions.jackson} -existingMajor: 2, newMajor: 2 -"com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" -> "[group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations]" -Resolving version: 1.26.1 -existingMajor: 1, newMajor: 1 -"org.apache.commons:commons-compress:1.26.1" -> "[group:org.apache.commons, name:commons-compress, version:commons-compress]" -Resolving version: 2.15.1 -"commons-io:commons-io:2.15.1" -> "[group:commons-io, name:commons-io, version:commons-io]" -Resolving version: ${versions.commons_lang3} -"org.apache.commons:commons-lang3:${versions.commons_lang3}" -> "[group:org.apache.commons, name:commons-lang3, version:commons-lang3]" - -libraries Catalog versions -randomizedtesting-runner = [group:com.carrotsearch.randomizedtesting, name:randomizedtesting-runner, version:randomizedtesting-runner] -junit = [group:junit, name:junit, version:junit] -hamcrest = [group:org.hamcrest, name:hamcrest, version:hamcrest] -lucene-test-framework = [group:org.apache.lucene, name:lucene-test-framework, version:lucene-test-framework] -lucene-codecs = [group:org.apache.lucene, name:lucene-codecs, version:lucene-codecs] -commons-logging = [group:commons-logging, name:commons-logging, version:commons-logging] -commons-codec = [group:commons-codec, name:commons-codec, version:commons-codec] -mockito-core = [group:org.mockito, name:mockito-core, version:mockito-core] -mockito-subclass = [group:org.mockito, name:mockito-subclass, version:mockito-subclass] -byte-buddy = [group:net.bytebuddy, name:byte-buddy, version:byte-buddy] -objenesis = [group:org.objenesis, name:objenesis, version:objenesis] -mocksocket = [group:org.elasticsearch, name:mocksocket, version:mocksocket] -log4j-api = [group:org.apache.logging.log4j, name:log4j-api, version:log4j-api] -jackson-core = [group:com.fasterxml.jackson.core, name:jackson-core, version:jackson-core] -jackson-annotations = [group:com.fasterxml.jackson.core, name:jackson-annotations, version:jackson-annotations] -jackson-databind = [group:com.fasterxml.jackson.core, name:jackson-databind, version:jackson-databind] -asm = [group:org.ow2.asm, name:asm, version:asm] -asm-tree = [group:org.ow2.asm, name:asm-tree, version:asm-tree] -asm-analysis = [group:org.ow2.asm, name:asm-analysis, version:asm-analysis] -testcontainers = [group:org.testcontainers, name:testcontainers, version:testcontainers] -docker-java-api = [group:com.github.docker-java, name:docker-java-api, version:docker-java-api] -slf4j-api = [group:org.slf4j, name:slf4j-api, version:slf4j-api] -docker-java-transport-zerodep = [group:com.github.docker-java, name:docker-java-transport-zerodep, version:docker-java-transport-zerodep] -docker-java-transport = [group:com.github.docker-java, name:docker-java-transport, version:docker-java-transport] -docker-java-core = [group:com.github.docker-java, name:docker-java-core, version:docker-java-core] -commons-compress = [group:org.apache.commons, name:commons-compress, version:commons-compress] -duct-tape = [group:org.rnorth.duct-tape, name:duct-tape, version:duct-tape] -lucene-core = [group:org.apache.lucene, name:lucene-core, version:lucene-core] -slf4j-simple = [group:org.slf4j, name:slf4j-simple, version:slf4j-simple] -hadoop-minicluster = [group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster] -hadoop-minicluster3 = [group:org.apache.hadoop, name:hadoop-minicluster, version:hadoop-minicluster3] -json-schema-validator = [group:com.networknt, name:json-schema-validator, version:json-schema-validator] -commons-io = [group:commons-io, name:commons-io, version:commons-io] -commons-lang3 = [group:org.apache.commons, name:commons-lang3, version:commons-lang3] -Version Catalog libraries -randomizedtesting-runner = 2.8.0 -junit = 4.13.2 -hamcrest = 2.1 -lucene-test-framework = 9.11.1 -lucene-codecs = 9.11.1 -commons-logging = 1.2 -commons-codec = 1.15 -mockito-core = 5.11.0 -mockito-subclass = 5.11.0 -byte-buddy = 1.14.12 -objenesis = 3.3 -mocksocket = 1.2 -log4j-api = 2.19.0 -jackson-core = 2.15.0 -jackson-annotations = 2.15.0 -jackson-databind = 2.15.0 -asm = 9.7 -asm-tree = 9.7 -asm-analysis = 9.7 -testcontainers = 1.19.2 -docker-java-api = 3.3.4 -slf4j-api = 2.0.6 -docker-java-transport-zerodep = 3.3.4 -docker-java-transport = 3.3.4 -docker-java-core = 3.3.4 -commons-compress = 1.26.1 -duct-tape = 1.0.8 -lucene-core = 9.11.1 -slf4j-simple = 2.0.6 -hadoop-minicluster = 2.8.5 -hadoop-minicluster3 = 3.3.1 -json-schema-validator = 1.0.48 -commons-io = 2.15.1 -commons-lang3 = 3.9 -Found 54 dependencies in 10 files. -randomizedtesting-runner -> randomizedtestingrunner -null -junit -> junit -4.13.2 -hamcrest -> hamcrest -2.1 -lucene-codecs -> lucene -null -lucene-core -> lucene -9.11.1 -lucene-test-framework -> lucene -9.11.1 -commons-logging -> commonslogging -null -commons-codec -> commonscodec -null -mockito-subclass -> mockito -null -mockito-core -> mockito -5.11.0 -byte-buddy -> bytebuddy -null -mocksocket -> mocksocket -1.2 -log4j-api -> log4j -null -jackson-annotations -> jackson -null -jackson-databind -> jackson -2.15.0 -jackson-core -> jackson -2.15.0 -asm-tree -> asm -9.7 -asm-analysis -> asm -9.7 -docker-java-transport-zerodep -> docker -null -docker-java-transport -> docker -3.3.4 -docker-java-core -> docker -3.3.4 -docker-java-api -> docker -3.3.4 -slf4j-simple -> slf4j -null -slf4j-api -> slf4j -2.0.6 -commons-compress -> commonscompress -null -duct-tape -> ducttape -null -hadoop-minicluster -> hadoopminicluster -null -hadoop-minicluster3 -> hadoopminicluster3 -null -json-schema-validator -> jsonschemavalidator -null -commons-io -> commonsio -null -commons-lang3 -> commonslang3 -null - - -versions: -asm = "9.7" -bytebuddy = "1.14.12" -commonscodec = "1.15" -commonscompress = "1.26.1" -commonsio = "2.15.1" -commonslang3 = "3.9" -commonslogging = "1.2" -docker = "3.3.4" -ducttape = "1.0.8" -hadoopminicluster = "2.8.5" -hadoopminicluster3 = "3.3.1" -jackson = "2.15.0" -jsonschemavalidator = "1.0.48" -log4j = "2.19.0" -lucene = "9.11.1" -mockito = "5.11.0" -objenesis = "3.3" -randomizedtestingrunner = "2.8.0" -slf4j = "2.0.6" -testcontainers = "1.19.2" - - -libraries: -asm = { group = "org.ow2.asm", name = "asm", version.ref = "asm" } -asm-analysis = { group = "org.ow2.asm", name = "asm-analysis", version.ref = "asm" } -asm-tree = { group = "org.ow2.asm", name = "asm-tree", version.ref = "asm" } -byte-buddy = { group = "net.bytebuddy", name = "byte-buddy", version.ref = "bytebuddy" } -commons-codec = { group = "commons-codec", name = "commons-codec", version.ref = "commonscodec" } -commons-compress = { group = "org.apache.commons", name = "commons-compress", version.ref = "commonscompress" } -commons-io = { group = "commons-io", name = "commons-io", version.ref = "commonsio" } -commons-lang3 = { group = "org.apache.commons", name = "commons-lang3", version.ref = "commonslang3" } -commons-logging = { group = "commons-logging", name = "commons-logging", version.ref = "commonslogging" } -docker-java-api = { group = "com.github.docker-java", name = "docker-java-api", version.ref = "docker" } -docker-java-core = { group = "com.github.docker-java", name = "docker-java-core", version.ref = "docker" } -docker-java-transport = { group = "com.github.docker-java", name = "docker-java-transport", version.ref = "docker" } -docker-java-transport-zerodep = { group = "com.github.docker-java", name = "docker-java-transport-zerodep", version.ref = "docker" } -duct-tape = { group = "org.rnorth.duct-tape", name = "duct-tape", version.ref = "ducttape" } -hadoop-minicluster = { group = "org.apache.hadoop", name = "hadoop-minicluster", version.ref = "hadoopminicluster" } -hadoop-minicluster3 = { group = "org.apache.hadoop", name = "hadoop-minicluster", version.ref = "hadoopminicluster3" } -hamcrest = { group = "org.hamcrest", name = "hamcrest", version.ref = "hamcrest" } -jackson-annotations = { group = "com.fasterxml.jackson.core", name = "jackson-annotations", version.ref = "jackson" } -jackson-core = { group = "com.fasterxml.jackson.core", name = "jackson-core", version.ref = "jackson" } -jackson-databind = { group = "com.fasterxml.jackson.core", name = "jackson-databind", version.ref = "jackson" } -json-schema-validator = { group = "com.networknt", name = "json-schema-validator", version.ref = "jsonschemavalidator" } -junit = { group = "junit", name = "junit", version.ref = "junit" } -log4j-api = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } -lucene-codecs = { group = "org.apache.lucene", name = "lucene-codecs", version.ref = "lucene" } -lucene-core = { group = "org.apache.lucene", name = "lucene-core", version.ref = "lucene" } -lucene-test-framework = { group = "org.apache.lucene", name = "lucene-test-framework", version.ref = "lucene" } -mockito-core = { group = "org.mockito", name = "mockito-core", version.ref = "mockito" } -mockito-subclass = { group = "org.mockito", name = "mockito-subclass", version.ref = "mockito" } -mocksocket = { group = "org.elasticsearch", name = "mocksocket", version.ref = "mocksocket" } -objenesis = { group = "org.objenesis", name = "objenesis", version.ref = "objenesis" } -randomizedtesting-runner = { group = "com.carrotsearch.randomizedtesting", name = "randomizedtesting-runner", version.ref = "randomizedtestingrunner" } -slf4j-api = { group = "org.slf4j", name = "slf4j-api", version.ref = "slf4j" } -slf4j-simple = { group = "org.slf4j", name = "slf4j-simple", version.ref = "slf4j" } -testcontainers = { group = "org.testcontainers", name = "testcontainers", version.ref = "testcontainers" } - - -Final versions -antlr4 = "4.13.1" -asm = "9.7" -aws = "1.12.270" -azure = "12.20.1" -azureCommon = "12.19.1" -azureCore = "1.34.0" -azureCoreHttpNetty = "1.12.7" -azureJackson = "2.15.4" -azureJacksonDatabind = "2.13.4.2" -bytebuddy = "1.14.12" -commonscodec = "1.15" -commonscompress = "1.26.1" -commonsio = "2.15.1" -commonslang3 = "3.9" -commonslogging = "1.2" -docker = "3.3.4" -ducttape = "1.0.8" -ecsLogging = "1.2.0" -google_oauth_client = "1.34.1" -hadoopminicluster = "2.8.5" -hadoopminicluster3 = "3.3.1" -hamcrest = "2.1" -httpcore = "4.4.13" -icu4j = "68.2" -jackson = "2.15.0" -jakartaActivation = "1.2.1" -jakartaXMLBind = "2.3.2" -jmh = "1.26" -jna = "5.12.1" -jsonschemavalidator = "1.0.48" -jts = "1.15.0" -junit = "4.13.2" -junit5 = "5.8.1" -log4j = "2.19.0" -log4japi = "2.19.0" -lucene = "9.11.1" -mockito = "5.11.0" -mocksocket = "1.2" -netty = "4.1.109.Final" -objenesis = "3.3" -opentelemetry = "1.31.0" -protobuf = "3.21.9" -randomizedtestingrunner = "2.8.0" -reactiveStreams = "1.0.4" -reactorCore = "3.4.34" -reactorNetty = "1.0.39" -slf4j = "2.0.6" -spatial4j = "0.7" -stax2API = "4.2.1" -testcontainers = "1.19.2" -woodstox = "6.4.0" - - -[libraries] -antlr4-runtime = { group = "org.antlr", name = "antlr4-runtime", version.ref = "antlr4" } -api-common = { group = "com.google.api", name = "api-common", version = "2.3.1" } -apm-agent = "co.elastic.apm:elastic-apm-agent:1.44.0" -asm = { group = "org.ow2.asm", name = "asm", version.ref = "asm" } -asm-analysis = { group = "org.ow2.asm", name = "asm-analysis", version.ref = "asm" } -asm-commons = { group = "org.ow2.asm", name = "asm-commons", version.ref = "asm" } -asm-tree = { group = "org.ow2.asm", name = "asm-tree", version.ref = "asm" } -asm-util = { group = "org.ow2.asm", name = "asm-util", version.ref = "asm" } -aws-jmespath-java = { group = "com.amazonaws", name = "jmespath-java", version.ref = "aws" } -aws-java-sdk-s3 = { group = "com.amazonaws", name = "aws-java-sdk-s3", version.ref = "aws" } -aws-java-sdk-core = { group = "com.amazonaws", name = "aws-java-sdk-core", version.ref = "aws" } -aws-java-sdk-sts = { group = "com.amazonaws", name = "aws-java-sdk-sts", version.ref = "aws" } -azure-core = { group = "com.azure", name = "azure-core", version.ref = "azureCore" } -azure-core-http-netty = { group = "com.azure", name = "azure-core-http-netty", version.ref = "azureCoreHttpNetty" } -azure-jackson-core = { group = "com.fasterxml.jackson.core", name = "jackson-core", version.ref = "azureJackson" } -azure-jackson-databind = { group = "com.fasterxml.jackson.core", name = "jackson-databind", version.ref = "azureJacksonDatabind" } -azure-jackson-Annotations = { group = "com.fasterxml.jackson.core", name = "jackson-annotations", version.ref = "azureJackson" } -azure-jackson-dataformat-xml = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-xml", version.ref = "azureJackson" } -azure-jackson-datatype-jsr310 = { group = "com.fasterxml.jackson.datatype", name = "jackson-datatype-jsr310", version.ref = "azureJackson" } -azure-jackson-module-jaxb-annotations = { group = "com.fasterxml.jackson.module", name = "jackson-module-jaxb-annotations", version.ref = "azureJackson" } -azure-storage-blob = { group = "com.azure", name = "azure-storage-blob", version.ref = "azure" } -azure-storage-common = { group = "com.azure", name = "azure-storage-common", version.ref = "azureCommon" } -bc-fips = "org.bouncycastle:bc-fips:1.0.2.4" -bcpg-fips = "org.bouncycastle:bcpg-fips:1.0.7.1" -byte-buddy = { group = "net.bytebuddy", name = "byte-buddy", version.ref = "bytebuddy" } -commons-codec = { group = "commons-codec", name = "commons-codec", version.ref = "commonscodec" } -commons-compress = { group = "org.apache.commons", name = "commons-compress", version.ref = "commonscompress" } -commons-logging = { group = "commons-logging", name = "commons-logging", version.ref = "commonslogging" } -commons-math3 = "org.apache.commons:commons-math3:3.2" -commons-io = { group = "commons-io", name = "commons-io", version.ref = "commonsio" } -ecs-logging-core = { group = "co.elastic.logging", name = "ecs-logging-core", version.ref = "ecsLogging" } -gax = { group = "com.google.api", name = "gax", version = "2.20.1" } -gax-httpjson = { group = "com.google.api", name = "gax-httpjson", version = "0.105.1" } -geoip2 = "com.maxmind.geoip2:geoip2:4.2.0" -geolite2-datbase = "org.elasticsearch:geolite2-databases:20191119" -google-api-client = { group = "com.google.api-client", name = "google-api-client", version = "2.1.1" } -google-api-services-storage = { group = "com.google.apis", name = "google-api-services-storage", version = "v1-rev20220705-2.0.0" } -google-auth-library-credentials = { group = "com.google.auth", name = "google-auth-library-credentials", version = "1.11.0" } -google-auth-library-credentials-oauth2-http = { group = "com.google.auth", name = "google-auth-library-oauth2-http", version = "1.11.0" } -google-cloud-core = { group = "com.google.cloud", name = "google-cloud-core", version = "2.8.28" } -google-cloud-core-http = { group = "com.google.cloud", name = "google-cloud-core-http", version = "2.8.28" } -google-cloud-storage = { group = "com.google.cloud", name = "google-cloud-storage", version = "2.13.1" } -google-http-client = { group = "com.google.http-client", name = "google-http-client", version = "1.42.3" } -google-http-client-appengine = { group = "com.google.http-client", name = "google-http-client-appengine", version = "1.42.3" } -google-http-client-jackson2 = { group = "com.google.http-client", name = "google-http-client-jackson2", version = "1.42.3" } -google-http-client-json = { group = "com.google.http-client", name = "google-http-client-gson", version = "1.42.3" } -google-oauth-client = { group = "com.google.oauth-client", name = "google-oauth-client", version.ref = "google_oauth_client" } -grpc-context = { group = "io.grpc", name = "grpc-context", version = "1.49.2" } -gson = { group = "com.google.code.gson", name = "gson", version = "2.10" } -guava = { group = "com.google.guava", name = "guava", version = "32.0.1-jre" } -guava-failureaccess = { group = "com.google.guava", name = "failureaccess", version = "1.0.1" } -hamcrest = { group = "org.hamcrest", name = "hamcrest", version.ref = "hamcrest" } -hppc = "com.carrotsearch:hppc:0.8.1" -hdrhistogram = "org.hdrhistogram:HdrHistogram:2.1.9" -httpasyncclient = { group = "org.apache.httpcomponents", name = "httpasyncclient", version = "4.1.5" } -httpclient = { group = "org.apache.httpcomponents", name = "httpclient", version = "4.5.14" } -httpcore = { group = "org.apache.httpcomponents", name = "httpcore", version.ref = "httpcore" } -httpcore-nio = { group = "org.apache.httpcomponents", name = "httpcore-nio", version.ref = "httpcore" } -icu4j = { group = "com.ibm.icu", name = "icu4j", version.ref = "icu4j" } -jackson-core = { group = "com.fasterxml.jackson.core", name = "jackson-core", version.ref = "jackson" } -jackson-annotations = { group = "com.fasterxml.jackson.core", name = "jackson-annotations", version.ref = "jackson" } -jackson-databind = { group = "com.fasterxml.jackson.core", name = "jackson-databind", version.ref = "jackson" } -jackson-dataformat-smile = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-smile", version.ref = "jackson" } -jackson-dataformat-yaml = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-yaml", version.ref = "jackson" } -jackson-dataformat-cbor = { group = "com.fasterxml.jackson.dataformat", name = "jackson-dataformat-cbor", version.ref = "jackson" } -jakarta-activation-api = { group = "jakarta.activation", name = "jakarta.activation-api", version.ref = "jakartaActivation" } -jakarta-xml-bind-api = { group = "jakarta.xml.bind", name = "jakarta.xml.bind-api", version.ref = "jakartaXMLBind" } -jansi = "org.fusesource.jansi:jansi:2.4.0" -jaxb-api = "javax.xml.bind:jaxb-api:2.2.2" -jcodings = { group = "org.jruby.jcodings", name = "jcodings", version = "1.0.44" } -jimfs = { group = "com.google.jimfs", name = "jimfs", version = "1.3.0" } -jimfs-guava = { group = "com.google.guava", name = "guava", version = "32.1.1-jre" } -jmh-core = { group = "org.openjdk.jmh", name = "jmh-core", version.ref = "jmh" } -jmh-generator-annprocess = { group = "org.openjdk.jmh", name = "jmh-generator-annprocess", version.ref = "jmh" } -jna = { group = "net.java.dev.jna", name = "jna", version.ref = "jna" } -joda-time = "joda-time:joda-time:2.10.14" -joni = { group = "org.jruby.joni", name = "joni", version = "2.1.29" } -jopt-simple = "net.sf.jopt-simple:jopt-simple:5.0.2" -jsr305 = "com.google.code.findbugs:jsr305:3.0.2" -jts-core = { group = "org.locationtech.jts", name = "jts-core", version.ref = "jts" } -junit = { group = "junit", name = "junit", version.ref = "junit" } -junit5-jupiter-api = { group = "org.junit.jupiter", name = "junit-jupiter-api", version.ref = "junit5" } -log4j12-api = { group = "org.apache.logging.log4j", name = "log4j-1.2-api", version.ref = "log4j" } -log4j2-ecs-layout = { group = "co.elastic.logging", name = "log4j2-ecs-layout", version.ref = "ecsLogging" } -log4j-api = { group = "org.apache.logging.log4j", name = "log4j-api", version.ref = "log4j" } -log4j-core = { group = "org.apache.logging.log4j", name = "log4j-core", version.ref = "log4j" } -lucene-core = { group = "org.apache.lucene", name = "lucene-core", version.ref = "lucene" } -lucene-analysis-common = { group = "org.apache.lucene", name = "lucene-analysis-common", version.ref = "lucene" } -lucene-analysis-icu = { group = "org.apache.lucene", name = "lucene-analysis-icu", version.ref = "lucene" } -lucene-analysis-kuromoji = { group = "org.apache.lucene", name = "lucene-analysis-kuromoji", version.ref = "lucene" } -lucene-analysis-morfologik = { group = "org.apache.lucene", name = "lucene-analysis-morfologik", version.ref = "lucene" } -lucene-analysis-nori = { group = "org.apache.lucene", name = "lucene-analysis-nori", version.ref = "lucene" } -lucene-analysis-phonetic = { group = "org.apache.lucene", name = "lucene-analysis-phonetic", version.ref = "lucene" } -lucene-analysis-smartcn = { group = "org.apache.lucene", name = "lucene-analysis-smartcn", version.ref = "lucene" } -lucene-analysis-stempel = { group = "org.apache.lucene", name = "lucene-analysis-stempel", version.ref = "lucene" } -lucene-backward-codecs = { group = "org.apache.lucene", name = "lucene-backward-codecs", version.ref = "lucene" } -lucene-codecs = { group = "org.apache.lucene", name = "lucene-codecs", version.ref = "lucene" } -lucene-expressions = { group = "org.apache.lucene", name = "lucene-expressions", version.ref = "lucene" } -lucene-highlighter = { group = "org.apache.lucene", name = "lucene-highlighter", version.ref = "lucene" } -lucene-grouping = { group = "org.apache.lucene", name = "lucene-grouping", version.ref = "lucene" } -lucene-join = { group = "org.apache.lucene", name = "lucene-join", version.ref = "lucene" } -lucene-memory = { group = "org.apache.lucene", name = "lucene-memory", version.ref = "lucene" } -lucene-misc = { group = "org.apache.lucene", name = "lucene-misc", version.ref = "lucene" } -lucene-queries = { group = "org.apache.lucene", name = "lucene-queries", version.ref = "lucene" } -lucene-queryparser = { group = "org.apache.lucene", name = "lucene-queryparser", version.ref = "lucene" } -lucene-sandbox = { group = "org.apache.lucene", name = "lucene-sandbox", version.ref = "lucene" } -lucene-suggest = { group = "org.apache.lucene", name = "lucene-suggest", version.ref = "lucene" } -lucene-spatial3d = { group = "org.apache.lucene", name = "lucene-spatial3d", version.ref = "lucene" } -lucene-spatial-extras = { group = "org.apache.lucene", name = "lucene-spatial-extras", version.ref = "lucene" } -lz4-java = { group = "org.lz4", name = "lz4-java", version = "1.8.0" } -maxmind-db = "com.maxmind.db:maxmind-db:3.1.0" -mockito-core = { group = "org.mockito", name = "mockito-core", version.ref = "mockito" } -mockito-subclass = { group = "org.mockito", name = "mockito-subclass", version.ref = "mockito" } -mocksocket = { group = "org.elasticsearch", name = "mocksocket", version.ref = "mocksocket" } -morfologik-stemming = "org.carrot2:morfologik-stemming:2.1.1" -morfologik-fsa = "org.carrot2:morfologik-fsa:2.1.1" -morfologik-ukrainian-search = "ua.net.nlp:morfologik-ukrainian-search:3.7.5" -mustache-compiler = "com.github.spullara.mustache.java:compiler:0.9.10" -netty-buffer = { group = "io.netty", name = "netty-buffer", version.ref = "netty" } -netty-codec = { group = "io.netty", name = "netty-codec", version.ref = "netty" } -netty-codec-dns = { group = "io.netty", name = "netty-codec-dns", version.ref = "netty" } -netty-codec-http = { group = "io.netty", name = "netty-codec-http", version.ref = "netty" } -netty-codec-http2 = { group = "io.netty", name = "netty-codec-http2", version.ref = "netty" } -netty-codec-socks = { group = "io.netty", name = "netty-codec-socks", version.ref = "netty" } -netty-common = { group = "io.netty", name = "netty-common", version.ref = "netty" } -netty-handler = { group = "io.netty", name = "netty-handler", version.ref = "netty" } -netty-handler-proxy = { group = "io.netty", name = "netty-handler-proxy", version.ref = "netty" } -netty-resolver = { group = "io.netty", name = "netty-resolver", version.ref = "netty" } -netty-resolver-dns = { group = "io.netty", name = "netty-resolver-dns", version.ref = "netty" } -netty-transport = { group = "io.netty", name = "netty-transport", version.ref = "netty" } -netty-transport-native-unix-common = { group = "io.netty", name = "netty-transport-native-unix-common", version.ref = "netty" } -objenesis = { group = "org.objenesis", name = "objenesis", version.ref = "objenesis" } -opencensus-api = { group = "io.opencensus", name = "opencensus-api", version = "0.31.1" } -opencensus-contrib-http-util = { group = "io.opencensus", name = "opencensus-contrib-http-util", version = "0.31.1" } -opentelemetry-api = { group = "io.opentelemetry", name = "opentelemetry-api", version.ref = "opentelemetry" } -opentelemetry-context = { group = "io.opentelemetry", name = "opentelemetry-context", version.ref = "opentelemetry" } -opentelemetry-semconv = { group = "io.opentelemetry", name = "opentelemetry-semconv", version = "1.21.0-alpha" } -proto-google-common-protos = { group = "com.google.api.grpc", name = "proto-google-common-protos", version = "2.9.6" } -proto-google-iam-v1 = { group = "com.google.api.grpc", name = "proto-google-iam-v1", version = "1.6.2" } -protobuf-java = { group = "com.google.protobuf", name = "protobuf-java", version.ref = "protobuf" } -protobuf-java-util = { group = "com.google.protobuf", name = "protobuf-java-util", version.ref = "protobuf" } -randomizedtesting-runner = { group = "com.carrotsearch.randomizedtesting", name = "randomizedtesting-runner", version.ref = "randomizedtestingrunner" } -reactor-netty-core = { group = "io.projectreactor.netty", name = "reactor-netty-core", version.ref = "reactorNetty" } -reactor-netty-http = { group = "io.projectreactor.netty", name = "reactor-netty-http", version.ref = "reactorNetty" } -reactor-core = { group = "io.projectreactor", name = "reactor-core", version.ref = "reactorCore" } -reactive-streams = { group = "org.reactivestreams", name = "reactive-streams", version.ref = "reactiveStreams" } -s2-geometry-library-java = { group = "io.sgr", name = "s2-geometry-library-java", version = "1.0.1" } -slf4j-api = { group = "org.slf4j", name = "slf4j-api", version.ref = "slf4j" } -slf4j-nop = { group = "org.slf4j", name = "slf4j-nop", version.ref = "slf4j" } -slf4j-simple = { group = "org.slf4j", name = "slf4j-simple", version.ref = "slf4j" } -snakeyaml = "org.yaml:snakeyaml:2.0" -spatial4j = { group = "org.locationtech.spatial4j", name = "spatial4j", version.ref = "spatial4j" } -stax2-api = { group = "org.codehaus.woodstox", name = "stax2-api", version.ref = "stax2API" } -threetenbp = { group = "org.threeten", name = "threetenbp", version = "1.6.5" } -woodstox-core = { group = "com.fasterxml.woodstox", name = "woodstox-core", version.ref = "woodstox" } From 2009bc7adff623cf10d8b563d3510eb13c048c8a Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Fri, 19 Jul 2024 15:51:54 -0400 Subject: [PATCH 091/406] Fixing EnterpriseGeoIpDownloaderIT (#111080) (#111113) Co-authored-by: Keith Massey --- .../geoip/EnterpriseGeoIpDownloaderIT.java | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java index d9665e180d960..2d068373717d8 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java @@ -105,13 +105,21 @@ public void testEnterpriseDownloaderTask() throws Exception { startEnterpriseGeoIpDownloaderTask(); configureDatabase(DATABASE_TYPE); createGeoIpPipeline(pipelineName, DATABASE_TYPE, sourceField, targetField); - String documentId = ingestDocument(indexName, pipelineName, sourceField); - GetResponse getResponse = client().get(new GetRequest(indexName, documentId)).actionGet(); - Map returnedSource = getResponse.getSource(); - assertNotNull(returnedSource); - Object targetFieldValue = returnedSource.get(targetField); - assertNotNull(targetFieldValue); - assertThat(((Map) targetFieldValue).get("organization_name"), equalTo("Bredband2 AB")); + + assertBusy(() -> { + /* + * We know that the .geoip_databases index has been populated, but we don't know for sure that the database has been pulled + * down and made available on all nodes. So we run this ingest-and-check step in an assertBusy. + */ + logger.info("Ingesting a test document"); + String documentId = ingestDocument(indexName, pipelineName, sourceField); + GetResponse getResponse = client().get(new GetRequest(indexName, documentId)).actionGet(); + Map returnedSource = getResponse.getSource(); + assertNotNull(returnedSource); + Object targetFieldValue = returnedSource.get(targetField); + assertNotNull(targetFieldValue); + assertThat(((Map) targetFieldValue).get("organization_name"), equalTo("Bredband2 AB")); + }); } private void startEnterpriseGeoIpDownloaderTask() { From 0937b4ec463338d046abcc228cbdc76cd0e3fb90 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 20 Jul 2024 09:31:43 +1000 Subject: [PATCH 092/406] Mute org.elasticsearch.packaging.test.DockerTests test021InstallPlugin #110343 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index ffe2e078938d9..50c38fef1543a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -84,6 +84,9 @@ tests: - class: org.elasticsearch.preallocate.PreallocateTests method: testPreallocate issue: https://github.com/elastic/elasticsearch/issues/110948 +- class: org.elasticsearch.packaging.test.DockerTests + method: test021InstallPlugin + issue: https://github.com/elastic/elasticsearch/issues/110343 # Examples: # From fffdcbe4d18c2a0822c29d5544b24b5886545d70 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:25:16 +1000 Subject: [PATCH 093/406] Mute org.elasticsearch.multi_node.RollupIT org.elasticsearch.multi_node.RollupIT #111142 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 50c38fef1543a..f477636fceda1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -87,6 +87,8 @@ tests: - class: org.elasticsearch.packaging.test.DockerTests method: test021InstallPlugin issue: https://github.com/elastic/elasticsearch/issues/110343 +- class: org.elasticsearch.multi_node.RollupIT + issue: https://github.com/elastic/elasticsearch/issues/111142 # Examples: # From 4a705e4d3b57cea32a895eb77ec1e236aba4d04a Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:25:25 +1000 Subject: [PATCH 094/406] Mute org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT #111124 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f477636fceda1..492cc6ef71984 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -89,6 +89,8 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/110343 - class: org.elasticsearch.multi_node.RollupIT issue: https://github.com/elastic/elasticsearch/issues/111142 +- class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT + issue: https://github.com/elastic/elasticsearch/issues/111124 # Examples: # From e551c743ccae545b14ea090c07568420d97d443c Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Mon, 22 Jul 2024 11:07:34 +0300 Subject: [PATCH 095/406] Add comma before charset parameter in `WWW-Authenticate` response header (#110906) (#111144) Add comma before charset parameter in WWW-Authenticate response header, according to RFC 7617. Co-authored-by: Philippus Baalman --- docs/changelog/110906.yaml | 5 +++++ .../security/authc/DefaultAuthenticationFailureHandler.java | 2 +- .../org/elasticsearch/xpack/core/security/authc/Realm.java | 2 +- .../xpack/core/security/support/Exceptions.java | 4 ++-- .../authc/DefaultAuthenticationFailureHandlerTests.java | 6 +++--- .../xpack/core/security/test/SecurityAssertions.java | 2 +- .../org/elasticsearch/xpack/security/SecurityTests.java | 4 ++-- .../xpack/security/authc/AuthenticationServiceTests.java | 4 ++-- .../elasticsearch/xpack/sql/client/RemoteFailureTests.java | 2 +- .../src/test/resources/remote_failure/missing_auth.json | 4 ++-- 10 files changed, 20 insertions(+), 15 deletions(-) create mode 100644 docs/changelog/110906.yaml diff --git a/docs/changelog/110906.yaml b/docs/changelog/110906.yaml new file mode 100644 index 0000000000000..6123b1108fd17 --- /dev/null +++ b/docs/changelog/110906.yaml @@ -0,0 +1,5 @@ +pr: 110906 +summary: "Add comma before charset parameter in WWW-Authenticate response header" +area: Authentication +type: bug +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java index b5469fadd95b6..c9b30a826248a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java @@ -43,7 +43,7 @@ public DefaultAuthenticationFailureHandler(final Map> failu if (failureResponseHeaders == null || failureResponseHeaders.isEmpty()) { this.defaultFailureResponseHeaders = Collections.singletonMap( "WWW-Authenticate", - Collections.singletonList("Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\"") + Collections.singletonList("Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\"") ); } else { this.defaultFailureResponseHeaders = Collections.unmodifiableMap( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java index 63989ee86b3a0..bce9e6255a037 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java @@ -69,7 +69,7 @@ public int order() { public Map> getAuthenticationFailureHeaders() { return Collections.singletonMap( "WWW-Authenticate", - Collections.singletonList("Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\"") + Collections.singletonList("Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\"") ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Exceptions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Exceptions.java index 37f1dce5af7ba..b323e7ef20171 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Exceptions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Exceptions.java @@ -18,13 +18,13 @@ private Exceptions() {} public static ElasticsearchSecurityException authenticationError(String msg, Throwable cause, Object... args) { ElasticsearchSecurityException e = new ElasticsearchSecurityException(msg, RestStatus.UNAUTHORIZED, cause, args); - e.addHeader("WWW-Authenticate", "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""); + e.addHeader("WWW-Authenticate", "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\""); return e; } public static ElasticsearchSecurityException authenticationError(String msg, Object... args) { ElasticsearchSecurityException e = new ElasticsearchSecurityException(msg, RestStatus.UNAUTHORIZED, args); - e.addHeader("WWW-Authenticate", "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""); + e.addHeader("WWW-Authenticate", "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\""); return e; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java index 46cb1b8e66930..b8b3087fd72b8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java @@ -33,7 +33,7 @@ public class DefaultAuthenticationFailureHandlerTests extends ESTestCase { public void testAuthenticationRequired() { final boolean testDefault = randomBoolean(); - final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""; + final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\""; final String bearerAuthScheme = "Bearer realm=\"" + XPackField.SECURITY + "\""; final DefaultAuthenticationFailureHandler failureHandler; if (testDefault) { @@ -69,7 +69,7 @@ public void testMissingToken() { } public void testExceptionProcessingRequest() { - final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""; + final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\""; final String bearerAuthScheme = "Bearer realm=\"" + XPackField.SECURITY + "\""; final String negotiateAuthScheme = randomFrom("Negotiate", "Negotiate Ijoijksdk"); final Map> failureResponseHeaders = new HashMap<>(); @@ -134,7 +134,7 @@ public void testExceptionProcessingRequest() { } public void testSortsWWWAuthenticateHeaderValues() { - final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""; + final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\""; final String bearerAuthScheme = "Bearer realm=\"" + XPackField.SECURITY + "\""; final String negotiateAuthScheme = randomFrom("Negotiate", "Negotiate Ijoijksdk"); final String apiKeyAuthScheme = "ApiKey"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/SecurityAssertions.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/SecurityAssertions.java index 3b40b96d26e10..cb989a970332d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/SecurityAssertions.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/SecurityAssertions.java @@ -22,6 +22,6 @@ public static void assertContainsWWWAuthenticateHeader(ElasticsearchSecurityExce assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); assertThat(e.getHeaderKeys(), hasSize(1)); assertThat(e.getHeader("WWW-Authenticate"), notNullValue()); - assertThat(e.getHeader("WWW-Authenticate"), contains("Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\"")); + assertThat(e.getHeader("WWW-Authenticate"), contains("Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\"")); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 1aa40a48ecc97..400bc35b93fd5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -748,7 +748,7 @@ public void testLicenseUpdateFailureHandlerUpdate() throws Exception { // On trial license, kerberos is allowed and the WWW-Authenticate response header should reflect that verifyHasAuthenticationHeaderValue( e, - "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\"", + "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\"", "Negotiate", "ApiKey" ); @@ -760,7 +760,7 @@ public void testLicenseUpdateFailureHandlerUpdate() throws Exception { request.getHttpRequest(), ActionListener.wrap(result -> { assertTrue(completed.compareAndSet(false, true)); }, e -> { // On basic or gold license, kerberos is not allowed and the WWW-Authenticate response header should also reflect that - verifyHasAuthenticationHeaderValue(e, "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\"", "ApiKey"); + verifyHasAuthenticationHeaderValue(e, "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\"", "ApiKey"); }) ); if (completed.get()) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 62b72b4f9750c..85a1dc1aa029d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -1500,7 +1500,7 @@ public void testRealmAuthenticateTerminateAuthenticationProcessWithException() { final boolean throwElasticsearchSecurityException = randomBoolean(); final boolean withAuthenticateHeader = throwElasticsearchSecurityException && randomBoolean(); Exception throwE = new Exception("general authentication error"); - final String basicScheme = "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""; + final String basicScheme = "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\""; String selectedScheme = randomFrom(basicScheme, "Negotiate IOJoj"); if (throwElasticsearchSecurityException) { throwE = new ElasticsearchSecurityException("authentication error", RestStatus.UNAUTHORIZED); @@ -1547,7 +1547,7 @@ public void testRealmAuthenticateGracefulTerminateAuthenticationProcess() { when(token.principal()).thenReturn(principal); when(firstRealm.token(threadContext)).thenReturn(token); when(firstRealm.supports(token)).thenReturn(true); - final String basicScheme = "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""; + final String basicScheme = "Basic realm=\"" + XPackField.SECURITY + "\", charset=\"UTF-8\""; mockAuthenticate(firstRealm, token, null, true); ElasticsearchSecurityException e = expectThrows( diff --git a/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java b/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java index d093332e48422..258a738a076c8 100644 --- a/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java +++ b/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/RemoteFailureTests.java @@ -61,7 +61,7 @@ public void testParseMissingAuth() throws IOException { assertEquals("missing authentication token for REST request [/?pretty&error_trace]", failure.reason()); assertThat(failure.remoteTrace(), containsString("DefaultAuthenticationFailureHandler.missingToken")); assertNull(failure.cause()); - assertEquals(singletonMap("WWW-Authenticate", "Basic realm=\"security\" charset=\"UTF-8\""), failure.headers()); + assertEquals(singletonMap("WWW-Authenticate", "Basic realm=\"security\", charset=\"UTF-8\""), failure.headers()); } public void testNoError() { diff --git a/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/missing_auth.json b/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/missing_auth.json index 3d2927f85d6b1..d21fece75f7ac 100644 --- a/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/missing_auth.json +++ b/x-pack/plugin/sql/sql-client/src/test/resources/remote_failure/missing_auth.json @@ -5,7 +5,7 @@ "type" : "security_exception", "reason" : "missing authentication token for REST request [/?pretty&error_trace]", "header" : { - "WWW-Authenticate" : "Basic realm=\"security\" charset=\"UTF-8\"" + "WWW-Authenticate" : "Basic realm=\"security\", charset=\"UTF-8\"" }, "stack_trace" : "ElasticsearchSecurityException[missing authentication token for REST request [/?pretty&error_trace]]\n\tat org.elasticsearch.xpack.security.support.Exceptions.authenticationError(Exceptions.java:36)\n\tat org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler.missingToken(DefaultAuthenticationFailureHandler.java:69)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$AuditableRestRequest.anonymousAccessDenied(AuthenticationService.java:603)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$handleNullToken$17(AuthenticationService.java:357)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.handleNullToken(AuthenticationService.java:362)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.consumeToken(AuthenticationService.java:277)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$extractToken$7(AuthenticationService.java:249)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.extractToken(AuthenticationService.java:266)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$null$0(AuthenticationService.java:201)\n\tat org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:59)\n\tat org.elasticsearch.xpack.security.authc.TokenService.getAndValidateToken(TokenService.java:230)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$authenticateAsync$2(AuthenticationService.java:197)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$lookForExistingAuthentication$4(AuthenticationService.java:228)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lookForExistingAuthentication(AuthenticationService.java:239)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.authenticateAsync(AuthenticationService.java:193)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.access$000(AuthenticationService.java:147)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService.authenticate(AuthenticationService.java:99)\n\tat org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest(SecurityRestFilter.java:69)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:80)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" } @@ -13,7 +13,7 @@ "type" : "security_exception", "reason" : "missing authentication token for REST request [/?pretty&error_trace]", "header" : { - "WWW-Authenticate" : "Basic realm=\"security\" charset=\"UTF-8\"" + "WWW-Authenticate" : "Basic realm=\"security\", charset=\"UTF-8\"" }, "stack_trace" : "ElasticsearchSecurityException[missing authentication token for REST request [/?pretty&error_trace]]\n\tat org.elasticsearch.xpack.security.support.Exceptions.authenticationError(Exceptions.java:36)\n\tat org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler.missingToken(DefaultAuthenticationFailureHandler.java:69)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$AuditableRestRequest.anonymousAccessDenied(AuthenticationService.java:603)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$handleNullToken$17(AuthenticationService.java:357)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.handleNullToken(AuthenticationService.java:362)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.consumeToken(AuthenticationService.java:277)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$extractToken$7(AuthenticationService.java:249)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.extractToken(AuthenticationService.java:266)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$null$0(AuthenticationService.java:201)\n\tat org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:59)\n\tat org.elasticsearch.xpack.security.authc.TokenService.getAndValidateToken(TokenService.java:230)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$authenticateAsync$2(AuthenticationService.java:197)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$lookForExistingAuthentication$4(AuthenticationService.java:228)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lookForExistingAuthentication(AuthenticationService.java:239)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.authenticateAsync(AuthenticationService.java:193)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.access$000(AuthenticationService.java:147)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService.authenticate(AuthenticationService.java:99)\n\tat org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest(SecurityRestFilter.java:69)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:80)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" }, From 572e039bbcda956b53a99e413ba9d8a950e1c271 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 22 Jul 2024 11:45:49 +0100 Subject: [PATCH 096/406] Rework docs on logging levels (#111143) (#111150) Clarify that the default config is the recommended one, and that users should not normally enable `DEBUG` or `TRACE` logging without looking at the source code. Also reorders the information a bit for easier reading. --- docs/reference/setup/logging-config.asciidoc | 60 +++++++++++++++----- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index 7b36b6382c9bf..e382bbdacb464 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -140,19 +140,41 @@ documentation]. [[configuring-logging-levels]] === Configuring logging levels -Each Java package in the {es-repo}[{es} source code] has a related logger. For -example, the `org.elasticsearch.discovery` package has -`logger.org.elasticsearch.discovery` for logs related to the -<> process. - -To get more or less verbose logs, use the <> to change the related logger's log level. Each logger -accepts Log4j 2's built-in log levels, from least to most verbose: `OFF`, -`FATAL`, `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. The default log level is -`INFO`. Messages logged at higher verbosity levels (`DEBUG` and `TRACE`) are -only intended for expert use. To prevent leaking sensitive information in logs, -{es} will reject setting certain loggers to higher verbosity levels unless -<> is enabled. +Log4J 2 log messages include a _level_ field, which is one of the following (in +order of increasing verbosity): + +* `FATAL` +* `ERROR` +* `WARN` +* `INFO` +* `DEBUG` +* `TRACE` + +By default {es} includes all messages at levels `INFO`, `WARN`, `ERROR` and +`FATAL` in its logs, but filters out messages at levels `DEBUG` and `TRACE`. +This is the recommended configuration. Do not filter out messages at `INFO` or +higher log levels or else you may not be able to understand your cluster's +behaviour or troubleshoot common problems. Do not enable logging at levels +`DEBUG` or `TRACE` unless you are following instructions elsewhere in this +manual which call for more detailed logging, or you are an expert user who will +be reading the {es} source code to determine the meaning of the logs. + +Messages are logged by a hierarchy of loggers which matches the hierarchy of +Java packages and classes in the {es-repo}[{es} source code]. Every logger has +a corresponding <> which can be used +to control the verbosity of its logs. The setting's name is the fully-qualified +name of the package or class, prefixed with `logger.`. + +You may set each logger's verbosity to the name of a log level, for instance +`DEBUG`, which means that messages from this logger at levels up to the +specified one will be included in the logs. You may also use the value `OFF` to +suppress all messages from the logger. + +For example, the `org.elasticsearch.discovery` package contains functionality +related to the <> process, and you can +control the verbosity of its logs with the `logger.org.elasticsearch.discovery` +setting. To enable `DEBUG` logging for this package, use the +<> as follows: [source,console] ---- @@ -164,8 +186,8 @@ PUT /_cluster/settings } ---- -To reset a logger's verbosity to its default level, set the logger setting to -`null`: +To reset this package's log verbosity to its default level, set the logger +setting to `null`: [source,console] ---- @@ -211,6 +233,14 @@ formatting the same information in different ways, renaming the logger or adjusting the log level for specific messages. Do not rely on the contents of the application logs remaining precisely the same between versions. +NOTE: To prevent leaking sensitive information in logs, {es} suppresses certain +log messages by default even at the highest verbosity levels. To disable this +protection on a node, set the Java system property +`es.insecure_network_trace_enabled` to `true`. This feature is primarily +intended for test systems which do not contain any sensitive information. If you +set this property on a system which contains sensitive information, you must +protect your logs from unauthorized access. + [discrete] [[deprecation-logging]] === Deprecation logging From 7cdc1b0df0b92ca89fd30936c21854db40f2e521 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 22 Jul 2024 11:58:50 -0700 Subject: [PATCH 097/406] Unmute DockerTests.test600Interrupt (#111165) (#111168) Investigating https://github.com/elastic/elasticsearch/issues/111132 and it seems this test has been muted on `main` for some time. Let's unmute, to see if this is specific to the 7.17 branch or not. --- .../test/java/org/elasticsearch/packaging/test/DockerTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index f9723f30cc371..6c4f3b354c73e 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -1231,7 +1231,6 @@ public void test500Readiness() throws Exception { assertBusy(() -> assertTrue(readinessProbe(9399))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99508") public void test600Interrupt() { waitForElasticsearch(installation, "elastic", PASSWORD); final Result containerLogs = getContainerLogs(); From 75c1703d7f6b5775bf7fc1d06242034d9b563e5c Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 22 Jul 2024 14:44:58 -0500 Subject: [PATCH 098/406] Avoid calling real maxmind endpoint from EnterpriseGeoIpDownloader (#111121) (#111171) --- .../geoip/EnterpriseGeoIpDownloaderIT.java | 18 +++----- .../geoip/EnterpriseGeoIpHttpFixture.java | 42 ++++++++----------- 2 files changed, 24 insertions(+), 36 deletions(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java index 2d068373717d8..cc757c413713d 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; import org.elasticsearch.ingest.EnterpriseGeoIpTask; import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; @@ -54,13 +53,12 @@ public class EnterpriseGeoIpDownloaderIT extends ESIntegTestCase { private static final String DATABASE_TYPE = "GeoIP2-City"; - private static final boolean useFixture = Booleans.parseBoolean(System.getProperty("geoip_use_service", "false")) == false; @ClassRule - public static final EnterpriseGeoIpHttpFixture fixture = new EnterpriseGeoIpHttpFixture(useFixture, DATABASE_TYPE); + public static final EnterpriseGeoIpHttpFixture fixture = new EnterpriseGeoIpHttpFixture(DATABASE_TYPE); protected String getEndpoint() { - return useFixture ? fixture.getAddress() : null; + return fixture.getAddress(); } @Override @@ -71,11 +69,9 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { builder.setSecureSettings(secureSettings) .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true); - if (getEndpoint() != null) { - // note: this is using the enterprise fixture for the regular downloader, too, as - // a slightly hacky way of making the regular downloader not actually download any files - builder.put(GeoIpDownloader.ENDPOINT_SETTING.getKey(), getEndpoint()); - } + // note: this is using the enterprise fixture for the regular downloader, too, as + // a slightly hacky way of making the regular downloader not actually download any files + builder.put(GeoIpDownloader.ENDPOINT_SETTING.getKey(), getEndpoint()); return builder.build(); } @@ -94,9 +90,7 @@ public void testEnterpriseDownloaderTask() throws Exception { * was updated with information from the database. * Note that the "enterprise database" is actually just a geolite database being loaded by the GeoIpHttpFixture. */ - if (getEndpoint() != null) { - EnterpriseGeoIpDownloader.DEFAULT_MAXMIND_ENDPOINT = getEndpoint(); - } + EnterpriseGeoIpDownloader.DEFAULT_MAXMIND_ENDPOINT = getEndpoint(); final String pipelineName = "enterprise_geoip_pipeline"; final String indexName = "enterprise_geoip_test_index"; final String sourceField = "ip"; diff --git a/test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java b/test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java index 9a5205f66d1f4..5932890dd8459 100644 --- a/test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java +++ b/test/fixtures/geoip-fixture/src/main/java/fixture/geoip/EnterpriseGeoIpHttpFixture.java @@ -32,7 +32,6 @@ public class EnterpriseGeoIpHttpFixture extends ExternalResource { private final Path source; - private final boolean enabled; private final String[] databaseTypes; private HttpServer server; @@ -40,8 +39,7 @@ public class EnterpriseGeoIpHttpFixture extends ExternalResource { * The values in databaseTypes must be in DatabaseConfiguration.MAXMIND_NAMES, and must be one of the databases copied in the * copyFiles method of thisi class. */ - public EnterpriseGeoIpHttpFixture(boolean enabled, String... databaseTypes) { - this.enabled = enabled; + public EnterpriseGeoIpHttpFixture(String... databaseTypes) { this.databaseTypes = databaseTypes; try { this.source = Files.createTempDirectory("source"); @@ -56,28 +54,26 @@ public String getAddress() { @Override protected void before() throws Throwable { - if (enabled) { - copyFiles(); - this.server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + copyFiles(); + this.server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); - // for expediency reasons, it is handy to have this test fixture be able to serve the dual purpose of actually stubbing - // out the download protocol for downloading files from maxmind (see the looped context creation after this stanza), as - // we as to serve an empty response for the geoip.elastic.co service here - this.server.createContext("/", exchange -> { - String response = "[]"; // an empty json array - exchange.sendResponseHeaders(200, response.length()); - try (OutputStream os = exchange.getResponseBody()) { - os.write(response.getBytes(StandardCharsets.UTF_8)); - } - }); - - // register the file types for the download fixture - for (String databaseType : databaseTypes) { - createContextForEnterpriseDatabase(databaseType); + // for expediency reasons, it is handy to have this test fixture be able to serve the dual purpose of actually stubbing + // out the download protocol for downloading files from maxmind (see the looped context creation after this stanza), as + // we as to serve an empty response for the geoip.elastic.co service here + this.server.createContext("/", exchange -> { + String response = "[]"; // an empty json array + exchange.sendResponseHeaders(200, response.length()); + try (OutputStream os = exchange.getResponseBody()) { + os.write(response.getBytes(StandardCharsets.UTF_8)); } + }); - server.start(); + // register the file types for the download fixture + for (String databaseType : databaseTypes) { + createContextForEnterpriseDatabase(databaseType); } + + server.start(); } private void createContextForEnterpriseDatabase(String databaseType) { @@ -108,9 +104,7 @@ private void createContextForEnterpriseDatabase(String databaseType) { @Override protected void after() { - if (enabled) { - server.stop(0); - } + server.stop(0); } private void copyFiles() throws Exception { From a0d2d63068a55be9ad2cc1e363a3c62210327d8e Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 22 Jul 2024 13:12:02 -0700 Subject: [PATCH 099/406] Revert "Mute org.elasticsearch.multi_node.RollupIT org.elasticsearch.multi_node.RollupIT #111142" This reverts commit fffdcbe4 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 492cc6ef71984..dc71ba3647290 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -87,8 +87,6 @@ tests: - class: org.elasticsearch.packaging.test.DockerTests method: test021InstallPlugin issue: https://github.com/elastic/elasticsearch/issues/110343 -- class: org.elasticsearch.multi_node.RollupIT - issue: https://github.com/elastic/elasticsearch/issues/111142 - class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT issue: https://github.com/elastic/elasticsearch/issues/111124 From 4b5a5100d110cf543ca3d48ebc8bbbdc0b4183ce Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 22 Jul 2024 16:22:48 -0400 Subject: [PATCH 100/406] [8.15] Backport #111117 and #111162 (#111170) * Allow runtime java to match adoptium jdks (#111117) * Use bundled jdk version for immutable collections patch (#111162) Co-authored-by: Ryan Ernst --- .../elasticsearch/gradle/internal/BwcSetupExtension.java | 7 ++----- .../gradle/internal/info/GlobalBuildInfoPlugin.java | 1 - .../toolchain/ArchivedOracleJdkToolchainResolver.java | 5 ++++- .../ArchivedOracleJdkToolchainResolverSpec.groovy | 6 ------ test/immutable-collections-patch/build.gradle | 2 +- 5 files changed, 7 insertions(+), 14 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java index 7010ed92d4c57..4112d96c7296b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java @@ -26,7 +26,6 @@ import org.gradle.api.tasks.TaskProvider; import org.gradle.jvm.toolchain.JavaLanguageVersion; import org.gradle.jvm.toolchain.JavaToolchainService; -import org.gradle.jvm.toolchain.JvmVendorSpec; import java.io.File; import java.io.IOException; @@ -161,10 +160,8 @@ private static TaskProvider createRunBwcGradleTask( /** A convenience method for getting java home for a version of java and requiring that version for the given task to execute */ private static Provider getJavaHome(ObjectFactory objectFactory, JavaToolchainService toolChainService, final int version) { Property value = objectFactory.property(JavaLanguageVersion.class).value(JavaLanguageVersion.of(version)); - return toolChainService.launcherFor(javaToolchainSpec -> { - javaToolchainSpec.getLanguageVersion().value(value); - javaToolchainSpec.getVendor().set(JvmVendorSpec.ORACLE); - }).map(launcher -> launcher.getMetadata().getInstallationPath().getAsFile().getAbsolutePath()); + return toolChainService.launcherFor(javaToolchainSpec -> { javaToolchainSpec.getLanguageVersion().value(value); }) + .map(launcher -> launcher.getMetadata().getInstallationPath().getAsFile().getAbsolutePath()); } private static String readFromFile(File file) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index b8ebb454ddb16..e61bbefc9a973 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -350,7 +350,6 @@ private File resolveJavaHomeFromToolChainService(String version) { Property value = objectFactory.property(JavaLanguageVersion.class).value(JavaLanguageVersion.of(version)); Provider javaLauncherProvider = toolChainService.launcherFor(javaToolchainSpec -> { javaToolchainSpec.getLanguageVersion().value(value); - javaToolchainSpec.getVendor().set(JvmVendorSpec.ORACLE); }); return javaLauncherProvider.get().getMetadata().getInstallationPath().getAsFile(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java index b8cffae0189ce..913a15517f0af 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java @@ -23,9 +23,12 @@ import java.util.Map; import java.util.Optional; +/** + * Resolves released Oracle JDKs that are EOL. + */ public abstract class ArchivedOracleJdkToolchainResolver extends AbstractCustomJavaToolchainResolver { - private static final Map ARCHIVED_BASE_VERSIONS = Maps.of(20, "20.0.2", 19, "19.0.2", 18, "18.0.2.1", 17, "17.0.7"); + private static final Map ARCHIVED_BASE_VERSIONS = Maps.of(20, "20.0.2", 19, "19.0.2", 18, "18.0.2.1"); @Override public Optional resolve(JavaToolchainRequest request) { diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy index b7f08b6016679..dd6e7b324e745 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy @@ -40,12 +40,6 @@ class ArchivedOracleJdkToolchainResolverSpec extends AbstractToolchainResolverSp [18, ORACLE, LINUX, X86_64, "https://download.oracle.com/java/18/archive/jdk-18.0.2.1_linux-x64_bin.tar.gz"], [18, ORACLE, LINUX, AARCH64, "https://download.oracle.com/java/18/archive/jdk-18.0.2.1_linux-aarch64_bin.tar.gz"], [18, ORACLE, WINDOWS, X86_64, "https://download.oracle.com/java/18/archive/jdk-18.0.2.1_windows-x64_bin.zip"], - - [17, ORACLE, MAC_OS, X86_64, "https://download.oracle.com/java/17/archive/jdk-17.0.7_macos-x64_bin.tar.gz"], - [17, ORACLE, MAC_OS, AARCH64, "https://download.oracle.com/java/17/archive/jdk-17.0.7_macos-aarch64_bin.tar.gz"], - [17, ORACLE, LINUX, X86_64, "https://download.oracle.com/java/17/archive/jdk-17.0.7_linux-x64_bin.tar.gz"], - [17, ORACLE, LINUX, AARCH64, "https://download.oracle.com/java/17/archive/jdk-17.0.7_linux-aarch64_bin.tar.gz"], - [17, ORACLE, WINDOWS, X86_64, "https://download.oracle.com/java/17/archive/jdk-17.0.7_windows-x64_bin.zip"] ] } diff --git a/test/immutable-collections-patch/build.gradle b/test/immutable-collections-patch/build.gradle index 2d42215b3e02c..c3354e189847d 100644 --- a/test/immutable-collections-patch/build.gradle +++ b/test/immutable-collections-patch/build.gradle @@ -35,7 +35,7 @@ generatePatch.configure { executable = "${BuildParams.runtimeJavaHome}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') } else { javaLauncher = javaToolchains.launcherFor { - languageVersion = JavaLanguageVersion.of(BuildParams.runtimeJavaVersion.majorVersion) + languageVersion = JavaLanguageVersion.of(VersionProperties.bundledJdkMajorVersion) vendor = VersionProperties.bundledJdkVendor == "openjdk" ? JvmVendorSpec.ORACLE : JvmVendorSpec.matching(VersionProperties.bundledJdkVendor) From 1cc031125607e38ad827fdacd7832e70256eb007 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 23 Jul 2024 09:51:05 +0100 Subject: [PATCH 101/406] [ML DOCS]Timeout only applies to ELSER and built in E5 models (#111159) (#111182) --- docs/reference/inference/put-inference.asciidoc | 8 -------- .../inference/service-elasticsearch.asciidoc | 14 +++++++++++--- docs/reference/inference/service-elser.asciidoc | 14 +++++++++++--- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index b809a96b8f81a..948496c473a20 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -43,11 +43,3 @@ The following services are available through the {infer} API, click the links to * <> * <> * <> - -[NOTE] -==== -You might see a 502 bad gateway error in the response when using the {kib} Console. -This error usually just reflects a timeout, while the model downloads in the background. -You can check the download progress in the {ml-app} UI. -If using the Python client, you can set the `timeout` parameter to a higher value. -==== \ No newline at end of file diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 3b9b5b1928d7b..50b97b3506ee8 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -35,7 +35,7 @@ Available task types: `service`:: (Required, string) -The type of service supported for the specified task type. In this case, +The type of service supported for the specified task type. In this case, `elasticsearch`. `service_settings`:: @@ -58,7 +58,7 @@ The total number of allocations this model is assigned across machine learning n `num_threads`::: (Required, integer) -Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. +Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. Must be a power of 2. Max allowed value is 32. `task_settings`:: @@ -98,6 +98,14 @@ PUT _inference/text_embedding/my-e5-model Valid values are `.multilingual-e5-small` and `.multilingual-e5-small_linux-x86_64`. For further details, refer to the {ml-docs}/ml-nlp-e5.html[E5 model documentation]. +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== + [discrete] [[inference-example-eland]] ==== Models uploaded by Eland via the elasticsearch service @@ -119,4 +127,4 @@ PUT _inference/text_embedding/my-msmarco-minilm-model ------------------------------------------------------------ // TEST[skip:TBD] <1> The `model_id` must be the ID of a text embedding model which has already been -{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. \ No newline at end of file +{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 829ff4968c5be..dff531f2a414b 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -34,7 +34,7 @@ Available task types: `service`:: (Required, string) -The type of service supported for the specified task type. In this case, +The type of service supported for the specified task type. In this case, `elser`. `service_settings`:: @@ -51,7 +51,7 @@ The total number of allocations this model is assigned across machine learning n `num_threads`::: (Required, integer) -Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. +Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. Must be a power of 2. Max allowed value is 32. @@ -92,4 +92,12 @@ Example response: "task_settings": {} } ------------------------------------------------------------ -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE + +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== From 0f1fd0d5fdc7d10aee95de7b7ba3da7d1e0c0d1d Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Tue, 23 Jul 2024 12:25:07 +0200 Subject: [PATCH 102/406] Simple addition of ES|QL to geo overview page (#111158) (#111185) --- docs/reference/geospatial-analysis.asciidoc | 24 ++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/docs/reference/geospatial-analysis.asciidoc b/docs/reference/geospatial-analysis.asciidoc index 7577bb222127f..6760040e14bc7 100644 --- a/docs/reference/geospatial-analysis.asciidoc +++ b/docs/reference/geospatial-analysis.asciidoc @@ -2,7 +2,7 @@ [[geospatial-analysis]] = Geospatial analysis -Did you know that {es} has geospatial capabilities? https://www.elastic.co/blog/geo-location-and-search[{es} and geo] go way back, to 2010. A lot has happened since then and today {es} provides robust geospatial capabilities with speed, all with a stack that scales automatically. +Did you know that {es} has geospatial capabilities? https://www.elastic.co/blog/geo-location-and-search[{es} and geo] go way back, to 2010. A lot has happened since then and today {es} provides robust geospatial capabilities with speed, all with a stack that scales automatically. Not sure where to get started with {es} and geo? Then, you have come to the right place. @@ -18,8 +18,10 @@ Have an index with lat/lon pairs but no geo_point mapping? Use <> lets you clean, transform, and augment your data before indexing. +Data is often messy and incomplete. <> lets you clean, transform, and augment your data before indexing. +* Use <> together with <> to index CSV files with geo data. + Kibana's {kibana-ref}/import-geospatial-data.html[Import CSV] feature can help with this. * Use <> to add geographical location of an IPv4 or IPv6 address. * Use <> to convert grid tiles or hexagonal cell ids to bounding boxes or polygons which describe their shape. * Use <> for reverse geocoding. For example, use {kibana-ref}/reverse-geocoding-tutorial.html[reverse geocoding] to visualize metropolitan areas by web traffic. @@ -30,6 +32,18 @@ Data is often messy and incomplete. <> lets you clean, <> answer location-driven questions. Find documents that intersect with, are within, are contained by, or do not intersect your query geometry. Combine geospatial queries with full text search queries for unparalleled searching experience. For example, "Show me all subscribers that live within 5 miles of our new gym location, that joined in the last year and have running mentioned in their profile". +[discrete] +[[esql-query]] +=== ES|QL + +<> has support for <> functions, enabling efficient index searching for documents that intersect with, are within, are contained by, or are disjoint from a query geometry. In addition, the `ST_DISTANCE` function calculates the distance between two points. + +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> + [discrete] [[geospatial-aggregate]] === Aggregate @@ -42,12 +56,12 @@ Geospatial bucket aggregations: * <> groups geo_point and geo_shape values into buckets that represent a grid. * <> groups geo_point and geo_shape values into buckets that represent an H3 hexagonal cell. * <> groups geo_point and geo_shape values into buckets that represent a grid. Each cell corresponds to a {wikipedia}/Tiled_web_map[map tile] as used by many online map sites. - + Geospatial metric aggregations: * <> computes the geographic bounding box containing all values for a Geopoint or Geoshape field. * <> computes the weighted centroid from all coordinate values for geo fields. -* <> aggregates all geo_point values within a bucket into a LineString ordered by the chosen sort field. Use geo_line aggregation to create {kibana-ref}/asset-tracking-tutorial.html[vehicle tracks]. +* <> aggregates all geo_point values within a bucket into a LineString ordered by the chosen sort field. Use geo_line aggregation to create {kibana-ref}/asset-tracking-tutorial.html[vehicle tracks]. Combine aggregations to perform complex geospatial analysis. For example, to calculate the most recent GPS tracks per flight, use a <> to group documents into buckets per aircraft. Then use geo-line aggregation to compute a track for each aircraft. In another example, use geotile grid aggregation to group documents into a grid. Then use geo-centroid aggregation to find the weighted centroid of each grid cell. @@ -79,4 +93,4 @@ Put machine learning to work for you and find the data that should stand out wit Let your location data drive insights and action with {kibana-ref}/geo-alerting.html[geographic alerts]. Commonly referred to as geo-fencing, track moving objects as they enter or exit a boundary to receive notifications through common business systems (email, Slack, Teams, PagerDuty, and more). -Interested in learning more? Follow {kibana-ref}/asset-tracking-tutorial.html[step-by-step instructions] for setting up tracking containment alerts to monitor moving vehicles. \ No newline at end of file +Interested in learning more? Follow {kibana-ref}/asset-tracking-tutorial.html[step-by-step instructions] for setting up tracking containment alerts to monitor moving vehicles. From ecfe7384bb54dd42964d707fe4ac97ec29ec3712 Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Tue, 23 Jul 2024 08:51:27 -0400 Subject: [PATCH 103/406] docs for named and positional parameters (#111178) (#111190) --- docs/reference/esql/esql-rest.asciidoc | 41 ++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc index 5b90e96d7a734..2c8c5e81e273d 100644 --- a/docs/reference/esql/esql-rest.asciidoc +++ b/docs/reference/esql/esql-rest.asciidoc @@ -278,6 +278,47 @@ POST /_query ---- // TEST[setup:library] +The parameters can be named parameters or positional parameters. + +Named parameters use question mark placeholders (`?`) followed by a string. + +[source,console] +---- +POST /_query +{ + "query": """ + FROM library + | EVAL year = DATE_EXTRACT("year", release_date) + | WHERE page_count > ?page_count AND author == ?author + | STATS count = COUNT(*) by year + | WHERE count > ?count + | LIMIT 5 + """, + "params": [{"page_count" : 300}, {"author" : "Frank Herbert"}, {"count" : 0}] +} +---- +// TEST[setup:library] + +Positional parameters use question mark placeholders (`?`) followed by an +integer. + +[source,console] +---- +POST /_query +{ + "query": """ + FROM library + | EVAL year = DATE_EXTRACT("year", release_date) + | WHERE page_count > ?1 AND author == ?2 + | STATS count = COUNT(*) by year + | WHERE count > ?3 + | LIMIT 5 + """, + "params": [300, "Frank Herbert", 0] +} +---- +// TEST[setup:library] + [discrete] [[esql-rest-async-query]] ==== Running an async {esql} query From 0e6673f7ab45f3eaea29769300418b60e204ad48 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 23 Jul 2024 15:21:17 +0200 Subject: [PATCH 104/406] ES|QL: reduce max expression depth to 400 (#111186) (#111189) --- docs/changelog/111186.yaml | 6 ++++++ .../xpack/esql/parser/ExpressionBuilder.java | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/111186.yaml diff --git a/docs/changelog/111186.yaml b/docs/changelog/111186.yaml new file mode 100644 index 0000000000000..3676beb3910c5 --- /dev/null +++ b/docs/changelog/111186.yaml @@ -0,0 +1,6 @@ +pr: 111186 +summary: "ES|QL: reduce max expression depth to 400" +area: ES|QL +type: bug +issues: + - 109846 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 9769d286b484d..88279b65d2007 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -89,9 +89,22 @@ public abstract class ExpressionBuilder extends IdentifierBuilder { private int expressionDepth = 0; /** - * Maximum depth for nested expressions + * Maximum depth for nested expressions. + * Avoids StackOverflowErrors at parse time with very convoluted expressions, + * eg. EVAL x = sin(sin(sin(sin(sin(sin(sin(sin(sin(....sin(x)....) + * ANTLR parser is recursive, so the only way to prevent a StackOverflow is to detect how + * deep we are in the expression parsing and abort the query execution after a threshold + * + * This value is defined empirically, but the actual stack limit is highly + * dependent on the JVM and on the JIT. + * + * A value of 500 proved to be right below the stack limit, but it still triggered + * some CI failures (once every ~2000 iterations). see https://github.com/elastic/elasticsearch/issues/109846 + * Even though we didn't manage to reproduce the problem in real conditions, we decided + * to reduce the max allowed depth to 400 (that is still a pretty reasonable limit for real use cases) and be more safe. + * */ - public static final int MAX_EXPRESSION_DEPTH = 500; + public static final int MAX_EXPRESSION_DEPTH = 400; protected final QueryParams params; From c9af95cc9eb2bf27ce98047cd4a17f036cc2add2 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 23 Jul 2024 17:54:04 +0200 Subject: [PATCH 105/406] Fix Dissect with leading non-ascii characters (#111184) (#111196) Co-authored-by: Elastic Machine --- docs/changelog/111184.yaml | 5 +++++ .../org/elasticsearch/dissect/DissectParser.java | 2 +- .../elasticsearch/dissect/DissectParserTests.java | 12 ++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/111184.yaml diff --git a/docs/changelog/111184.yaml b/docs/changelog/111184.yaml new file mode 100644 index 0000000000000..5ecdba54b09be --- /dev/null +++ b/docs/changelog/111184.yaml @@ -0,0 +1,5 @@ +pr: 111184 +summary: Fix Dissect with leading non-ascii characters +area: Ingest Node +type: bug +issues: [] diff --git a/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java index f3f53f1b3c5ea..3c01e490369de 100644 --- a/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java @@ -203,7 +203,7 @@ public Map parse(String inputString) { DissectKey key = dissectPair.key(); byte[] delimiter = dissectPair.delimiter().getBytes(StandardCharsets.UTF_8); // start dissection after the first delimiter - int i = leadingDelimiter.length(); + int i = leadingDelimiter.getBytes(StandardCharsets.UTF_8).length; int valueStart = i; int lookAheadMatches; // start walking the input string byte by byte, look ahead for matches where needed diff --git a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java index 431b26fc1155d..2893e419a84a3 100644 --- a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java +++ b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java @@ -211,6 +211,18 @@ public void testMatchUnicode() { assertMatch("%{a->}࿏%{b}", "⟳༒࿏࿏࿏࿏࿏༒⟲", Arrays.asList("a", "b"), Arrays.asList("⟳༒", "༒⟲")); assertMatch("%{*a}࿏%{&a}", "⟳༒࿏༒⟲", Arrays.asList("⟳༒"), Arrays.asList("༒⟲")); assertMatch("%{}࿏%{a}", "⟳༒࿏༒⟲", Arrays.asList("a"), Arrays.asList("༒⟲")); + assertMatch( + "Zürich, the %{adjective} city in Switzerland", + "Zürich, the largest city in Switzerland", + Arrays.asList("adjective"), + Arrays.asList("largest") + ); + assertMatch( + "Zürich, the %{one} city in Switzerland; Zürich, the %{two} city in Switzerland", + "Zürich, the largest city in Switzerland; Zürich, the LARGEST city in Switzerland", + Arrays.asList("one", "two"), + Arrays.asList("largest", "LARGEST") + ); } public void testMatchRemainder() { From 00cfa6833607b6ea8e7b1661761afe30858e7c15 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 23 Jul 2024 12:33:27 -0700 Subject: [PATCH 106/406] Make docker packaging test more resilient (#111205) (#111210) Wrap check for container shutdown log message in an `assertBusy()` to deal with race conditions. Closes #111132 # Conflicts: # muted-tests.yml --- .../elasticsearch/packaging/test/DockerTests.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 6c4f3b354c73e..18668b842b2d3 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -1231,7 +1231,7 @@ public void test500Readiness() throws Exception { assertBusy(() -> assertTrue(readinessProbe(9399))); } - public void test600Interrupt() { + public void test600Interrupt() throws Exception { waitForElasticsearch(installation, "elastic", PASSWORD); final Result containerLogs = getContainerLogs(); @@ -1241,10 +1241,12 @@ public void test600Interrupt() { final int maxPid = infos.stream().map(i -> i.pid()).max(Integer::compareTo).get(); sh.run("bash -c 'kill -int " + maxPid + "'"); // send ctrl+c to all java processes - final Result containerLogsAfter = getContainerLogs(); - assertThat("Container logs should contain stopping ...", containerLogsAfter.stdout(), containsString("stopping ...")); - assertThat("No errors stdout", containerLogsAfter.stdout(), not(containsString("java.security.AccessControlException:"))); - assertThat("No errors stderr", containerLogsAfter.stderr(), not(containsString("java.security.AccessControlException:"))); + assertBusy(() -> { + final Result containerLogsAfter = getContainerLogs(); + assertThat("Container logs should contain stopping ...", containerLogsAfter.stdout(), containsString("stopping ...")); + assertThat("No errors stdout", containerLogsAfter.stdout(), not(containsString("java.security.AccessControlException:"))); + assertThat("No errors stderr", containerLogsAfter.stderr(), not(containsString("java.security.AccessControlException:"))); + }); } } From b521b48bf0c808d45a294334f1656ea73bc974e3 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 24 Jul 2024 09:48:31 +1000 Subject: [PATCH 107/406] Mute org.elasticsearch.packaging.test.DockerTests test600Interrupt #111132 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index dc71ba3647290..25459c3a29e61 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -89,6 +89,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/110343 - class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT issue: https://github.com/elastic/elasticsearch/issues/111124 +- class: org.elasticsearch.packaging.test.DockerTests + method: test600Interrupt + issue: https://github.com/elastic/elasticsearch/issues/111132 # Examples: # From 8e6507b950c9a186b5917e8d63dee990f26a6fa4 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 23 Jul 2024 17:13:01 -0700 Subject: [PATCH 108/406] Make ConfigurationTests.test20HostnameSubstitution more resilient (#111216) (#111218) This is an attempt to fix occasional test failures where asserting on a request response fails because the cluster has not finished initialization and cannot yet serve requests. Closes #109660 --- .../packaging/test/ConfigurationTests.java | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java index 1925b1e8f36ab..2ce9eef29d903 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java @@ -20,7 +20,6 @@ import static java.nio.file.attribute.PosixFilePermissions.fromString; import static org.elasticsearch.packaging.util.FileUtils.append; -import static org.hamcrest.Matchers.equalTo; import static org.junit.Assume.assumeFalse; public class ConfigurationTests extends PackagingTestCase { @@ -50,13 +49,15 @@ public void test20HostnameSubstitution() throws Exception { // security auto-config requires that the archive owner and the node process user be the same Platforms.onWindows(() -> sh.chown(confPath, installation.getOwner())); assertWhileRunning(() -> { - final String nameResponse = ServerUtils.makeRequest( - Request.Get("https://localhost:9200/_cat/nodes?h=name"), - "test_superuser", - "test_superuser_password", - ServerUtils.getCaCert(confPath) - ).strip(); - assertThat(nameResponse, equalTo("mytesthost")); + assertBusy(() -> { + final String nameResponse = ServerUtils.makeRequest( + Request.Get("https://localhost:9200/_cat/nodes?h=name"), + "test_superuser", + "test_superuser_password", + ServerUtils.getCaCert(confPath) + ).strip(); + assertEquals("mytesthost", nameResponse); + }); }); Platforms.onWindows(() -> sh.chown(confPath)); }); From 17c2161a1ab5569c510481644a1a3e28152b4eab Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 24 Jul 2024 12:39:13 +0200 Subject: [PATCH 109/406] ESQL: Fix variable shadowing when pushing down past Project (#108360) (#111229) Fix bugs caused by pushing down Eval, Grok, Dissect and Enrich past Rename, where after the pushdown, the columns added shadowed the columns to be renamed. For Dissect and Grok, this enables naming their generated attributes to deviate from the names obtained from the dissect/grok patterns. (cherry picked from commit e8a01bbd9c41ed77239a7b41761d1e58994a034f) # Conflicts: # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java # x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java --- docs/changelog/108360.yaml | 6 + .../xpack/esql/EsqlTestUtils.java | 5 + .../src/main/resources/dissect.csv-spec | 33 +++ .../src/main/resources/enrich.csv-spec | 36 +++ .../src/main/resources/eval.csv-spec | 39 ++++ .../src/main/resources/grok.csv-spec | 33 +++ .../src/main/resources/stats.csv-spec | 59 +++++ .../xpack/esql/action/EsqlCapabilities.java | 8 +- .../xpack/esql/analysis/Analyzer.java | 4 +- .../esql/expression/NamedExpressions.java | 3 +- .../esql/optimizer/LogicalPlanOptimizer.java | 179 +++++++++++--- .../xpack/esql/optimizer/OptimizerRules.java | 13 +- .../esql/optimizer/rules/PushDownEnrich.java | 4 +- .../esql/optimizer/rules/PushDownEval.java | 4 +- .../optimizer/rules/PushDownRegexExtract.java | 2 +- .../ReplaceOrderByExpressionWithEval.java | 2 +- .../ReplaceStatsAggExpressionWithEval.java | 3 +- .../ReplaceStatsNestedExpressionWithEval.java | 3 +- .../optimizer/rules/SubstituteSurrogates.java | 28 +-- .../xpack/esql/parser/LogicalPlanBuilder.java | 16 +- .../xpack/esql/plan/GeneratingPlan.java | 40 ++++ .../xpack/esql/plan/logical/Dissect.java | 19 ++ .../xpack/esql/plan/logical/Enrich.java | 35 ++- .../xpack/esql/plan/logical/Eval.java | 52 ++++- .../xpack/esql/plan/logical/Grok.java | 5 + .../xpack/esql/plan/logical/RegexExtract.java | 31 ++- .../esql/planner/LocalExecutionPlanner.java | 20 +- .../optimizer/LogicalPlanOptimizerTests.java | 219 +++++++++++++++++- .../parser/AbstractStatementParserTests.java | 5 - .../esql/parser/StatementParserTests.java | 1 + 30 files changed, 795 insertions(+), 112 deletions(-) create mode 100644 docs/changelog/108360.yaml create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/GeneratingPlan.java diff --git a/docs/changelog/108360.yaml b/docs/changelog/108360.yaml new file mode 100644 index 0000000000000..087dd2649c6aa --- /dev/null +++ b/docs/changelog/108360.yaml @@ -0,0 +1,6 @@ +pr: 108360 +summary: "ESQL: Fix variable shadowing when pushing down past Project" +area: ES|QL +type: bug +issues: + - 108008 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index d7e067658267f..68696ad5ac99a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.Range; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; @@ -169,6 +170,10 @@ public static Literal of(Source source, Object value) { return new Literal(source, value, DataType.fromJava(value)); } + public static ReferenceAttribute referenceAttribute(String name, DataType type) { + return new ReferenceAttribute(EMPTY, name, type); + } + public static Range rangeOf(Expression value, Expression lower, boolean includeLower, Expression upper, boolean includeUpper) { return new Range(EMPTY, value, lower, includeLower, upper, includeUpper, randomZone()); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec index 8c4e797b7982d..38f09d2e3c56e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec @@ -75,6 +75,39 @@ first_name:keyword | last_name:keyword | name:keyword | foo:keyword Georgi | Facello | Georgi1 Facello | Facello ; +shadowingWhenPushedDownPastRename +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", long_city_name = "Zurich, the largest city in Switzerland" +| RENAME city AS c +| DISSECT long_city_name "Zurich, the %{city} city in Switzerland" +; + +c:keyword | long_city_name:keyword | city:keyword +Zürich | Zurich, the largest city in Switzerland | largest +; + +shadowingWhenPushedDownPastRename2 +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", long_city_name = "Zurich, the largest city in Switzerland" +| RENAME city AS c +| DISSECT long_city_name "Zurich, the %{city} city in %{foo}" +; + +c:keyword | long_city_name:keyword | city:keyword | foo:keyword +Zürich | Zurich, the largest city in Switzerland | largest | Switzerland +; + +shadowingWhenPushedDownPastRename3 +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", long_city_name = "Zurich, the largest city in Switzerland" +| RENAME long_city_name AS c +| DISSECT c "Zurich, the %{long_city_name} city in Switzerland" +; + +city:keyword | c:keyword | long_city_name:keyword +Zürich | Zurich, the largest city in Switzerland | largest +; + complexPattern ROW a = "1953-01-23T12:15:00Z - some text - 127.0.0.1;" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec index ab2ddb84ed969..925c08f317125 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec @@ -174,6 +174,42 @@ city:keyword | airport:text Zürich | Zurich Int'l ; +shadowingWhenPushedDownPastRename +required_capability: enrich_load +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", airport = "ZRH" +| RENAME airport AS a +| ENRICH city_names ON city WITH airport +; + +city:keyword | a:keyword | airport:text +Zürich | ZRH | Zurich Int'l +; + +shadowingWhenPushedDownPastRename2 +required_capability: enrich_load +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", airport = "ZRH" +| RENAME airport AS a +| ENRICH city_names ON city WITH airport, region +; + +city:keyword | a:keyword | airport:text | region:text +Zürich | ZRH | Zurich Int'l | Bezirk Zürich +; + +shadowingWhenPushedDownPastRename3 +required_capability: enrich_load +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", airport = "ZRH" +| RENAME city as c +| ENRICH city_names ON c WITH city = airport +; + +c:keyword | airport:keyword | city:text +Zürich | ZRH | Zurich Int'l +; + simple required_capability: enrich_load diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 770358e5120da..61a0ccd4af0c5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -55,6 +55,45 @@ x:integer 9999 ; +shadowingWhenPushedDownPastRename +required_capability: fixed_pushdown_past_project +FROM employees +| WHERE emp_no < 10002 +| KEEP emp_no, languages +| RENAME emp_no AS z +| EVAL emp_no = 3 +; + +z:integer | languages:integer | emp_no:integer + 10001 | 2 | 3 +; + +shadowingWhenPushedDownPastRename2 +required_capability: fixed_pushdown_past_project +FROM employees +| WHERE emp_no < 10002 +| KEEP emp_no, languages +| RENAME emp_no AS z +| EVAL emp_no = z + 1, emp_no = emp_no + languages, a = 0, languages = -1 +; + +z:integer | emp_no:integer | a:integer | languages:integer + 10001 | 10004 | 0 | -1 +; + +shadowingWhenPushedDownPastRename3 +required_capability: fixed_pushdown_past_project +FROM employees +| WHERE emp_no < 10002 +| KEEP emp_no, languages +| RENAME emp_no AS z +| EVAL emp_no = z + 1 +; + +z:integer | languages:integer | emp_no:integer + 10001 | 2 | 10002 +; + withMath row a = 1 | eval b = 2 + 3; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec index d9857e8c122ef..98c88d06caa75 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec @@ -76,6 +76,39 @@ San Francisco | CA 94108 | ["CA", "94108"] Tokyo | 100-7014 | null ; +shadowingWhenPushedDownPastRename +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", long_city_name = "Zürich, the largest city in Switzerland" +| RENAME city AS c +| GROK long_city_name "Zürich, the %{WORD:city} %{WORD:city} %{WORD:city} %{WORD:city}" +; + +c:keyword | long_city_name:keyword | city:keyword +Zürich | Zürich, the largest city in Switzerland | ["largest", "city", "in", "Switzerland"] +; + +shadowingWhenPushedDownPastRename2 +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", long_city_name = "Zürich, the largest city in Switzerland" +| RENAME city AS c +| GROK long_city_name "Zürich, the %{WORD:city} %{WORD:foo} %{WORD:city} %{WORD:foo}" +; + +c:keyword | long_city_name:keyword | city:keyword | foo:keyword +Zürich | Zürich, the largest city in Switzerland | ["largest", "in"] | ["city", "Switzerland"] +; + +shadowingWhenPushedDownPastRename3 +required_capability: fixed_pushdown_past_project +ROW city = "Zürich", long_city_name = "Zürich, the largest city in Switzerland" +| RENAME long_city_name AS c +| GROK c "Zürich, the %{WORD:long_city_name} %{WORD:long_city_name} %{WORD:long_city_name} %{WORD:long_city_name}" +; + +city:keyword | c:keyword | long_city_name:keyword +Zürich | Zürich, the largest city in Switzerland | ["largest", "city", "in", "Switzerland"] +; + complexPattern ROW a = "1953-01-23T12:15:00Z 127.0.0.1 some.email@foo.com 42" | GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index b2080b54b981c..be4342b95c6b8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -575,6 +575,65 @@ ca:l | cx:l | l:i 1 | 1 | null ; +/////////////////////////////////////////////////////////////// +// Test edge case interaction with push down past a rename +// https://github.com/elastic/elasticsearch/issues/108008 +/////////////////////////////////////////////////////////////// + +countSameFieldWithEval +required_capability: fixed_pushdown_past_project +from employees | stats b = count(gender), c = count(gender) by gender | eval b = gender | sort c asc +; + +c:l | gender:s | b:s +0 | null | null +33 | F | F +57 | M | M +; + +countSameFieldWithDissect +required_capability: fixed_pushdown_past_project +from employees | stats b = count(gender), c = count(gender) by gender | dissect gender "%{b}" | sort c asc +; + +c:l | gender:s | b:s +0 | null | null +33 | F | F +57 | M | M +; + +countSameFieldWithGrok +required_capability: fixed_pushdown_past_project +from employees | stats b = count(gender), c = count(gender) by gender | grok gender "%{USERNAME:b}" | sort c asc +; + +c:l | gender:s | b:s +0 | null | null +33 | F | F +57 | M | M +; + +countSameFieldWithEnrich +required_capability: fixed_pushdown_past_project +required_capability: enrich_load +from employees | stats b = count(gender), c = count(gender) by gender | enrich languages_policy on gender with b = language_name | sort c asc +; + +c:l | gender:s | b:s +0 | null | null +33 | F | null +57 | M | null +; + +countSameFieldWithEnrichLimit0 +required_capability: fixed_pushdown_past_project +from employees | stats b = count(gender), c = count(gender) by gender | enrich languages_policy on gender with b = language_name | sort c asc | limit 0 +; + +c:l | gender:s | b:s +; +/////////////////////////////////////////////////////////////// + aggsWithoutStats from employees | stats by gender | sort gender; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 5641f49b039f6..918e9614f9070 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -139,7 +139,13 @@ public enum Cap { * Fix for non-unique attribute names in ROW and logical plans. * https://github.com/elastic/elasticsearch/issues/110541 */ - UNIQUE_NAMES; + UNIQUE_NAMES, + + /** + * Make attributes of GROK/DISSECT adjustable and fix a shadowing bug when pushing them down past PROJECT. + * https://github.com/elastic/elasticsearch/issues/108008 + */ + FIXED_PUSHDOWN_PAST_PROJECT; private final boolean snapshotOnly; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index b41156824be12..37c8cceb3f605 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -65,7 +65,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.DateTimeArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSurrogates; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -1193,7 +1193,7 @@ private Expression createIfDoesNotAlreadyExist( List unionFieldAttributes ) { // Generate new ID for the field and suffix it with the data type to maintain unique attribute names. - String unionTypedFieldName = SubstituteSurrogates.rawTemporaryName( + String unionTypedFieldName = LogicalPlanOptimizer.rawTemporaryName( fa.name(), "converted_to", resolvedField.getDataType().typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/NamedExpressions.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/NamedExpressions.java index d0c8adfd3c858..624ea9a030208 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/NamedExpressions.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/NamedExpressions.java @@ -33,7 +33,8 @@ public static List mergeOutputAttributes( /** * Merges output expressions of a command given the new attributes plus the existing inputs that are emitted as outputs. * As a general rule, child output will come first in the list, followed by the new fields. - * In case of name collisions, only last entry is preserved (previous expressions with the same name are discarded) + * In case of name collisions, only the last entry is preserved (previous expressions with the same name are discarded) + * and the new attributes have precedence over the child output. * @param fields the fields added by the command * @param childOutput the command input that has to be propagated as output * @return diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index ca4b5d17deed3..439289c879c27 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -15,6 +15,9 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.NameId; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; @@ -22,6 +25,7 @@ import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.optimizer.rules.AddDefaultTopN; import org.elasticsearch.xpack.esql.optimizer.rules.BooleanFunctionEqualsElimination; import org.elasticsearch.xpack.esql.optimizer.rules.BooleanSimplification; @@ -67,6 +71,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSpatialSurrogates; import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSurrogates; import org.elasticsearch.xpack.esql.optimizer.rules.TranslateMetricsAggregate; +import org.elasticsearch.xpack.esql.plan.GeneratingPlan; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; @@ -74,8 +79,11 @@ import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static java.util.Arrays.asList; @@ -89,6 +97,34 @@ public LogicalPlanOptimizer(LogicalOptimizerContext optimizerContext) { super(optimizerContext); } + public static String temporaryName(Expression inner, Expression outer, int suffix) { + String in = toString(inner); + String out = toString(outer); + return rawTemporaryName(in, out, String.valueOf(suffix)); + } + + public static String locallyUniqueTemporaryName(String inner, String outer) { + return FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + new NameId(); + } + + public static String rawTemporaryName(String inner, String outer, String suffix) { + return FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + suffix; + } + + static String toString(Expression ex) { + return ex instanceof AggregateFunction af ? af.functionName() : extractString(ex); + } + + static String extractString(Expression ex) { + return ex instanceof NamedExpression ne ? ne.name() : limitToString(ex.sourceText()).replace(' ', '_'); + } + + static int TO_STRING_LIMIT = 16; + + static String limitToString(String string) { + return string.length() > TO_STRING_LIMIT ? string.substring(0, TO_STRING_LIMIT - 1) + ">" : string; + } + public LogicalPlan optimize(LogicalPlan verified) { var optimized = execute(verified); @@ -189,35 +225,26 @@ public static LogicalPlan skipPlan(UnaryPlan plan, LocalSupplier supplier) { /** * Pushes LogicalPlans which generate new attributes (Eval, Grok/Dissect, Enrich), past OrderBys and Projections. - * Although it seems arbitrary whether the OrderBy or the Eval is executed first, this transformation ensures that OrderBys only - * separated by an eval can be combined by PushDownAndCombineOrderBy. - * - * E.g.: - * - * ... | sort a | eval x = b + 1 | sort x - * - * becomes - * - * ... | eval x = b + 1 | sort a | sort x - * - * Ordering the Evals before the OrderBys has the advantage that it's always possible to order the plans like this. + * Although it seems arbitrary whether the OrderBy or the generating plan is executed first, this transformation ensures that OrderBys + * only separated by e.g. an Eval can be combined by {@link PushDownAndCombineOrderBy}. + *

+ * E.g. {@code ... | sort a | eval x = b + 1 | sort x} becomes {@code ... | eval x = b + 1 | sort a | sort x} + *

+ * Ordering the generating plans before the OrderBys has the advantage that it's always possible to order the plans like this. * E.g., in the example above it would not be possible to put the eval after the two orderBys. - * - * In case one of the Eval's fields would shadow the orderBy's attributes, we rename the attribute first. - * - * E.g. - * - * ... | sort a | eval a = b + 1 | ... - * - * becomes - * - * ... | eval $$a = a | eval a = b + 1 | sort $$a | drop $$a + *

+ * In case one of the generating plan's attributes would shadow the OrderBy's attributes, we alias the generated attribute first. + *

+ * E.g. {@code ... | sort a | eval a = b + 1 | ...} becomes {@code ... | eval $$a = a | eval a = b + 1 | sort $$a | drop $$a ...} + *

+ * In case the generating plan's attributes would shadow the Project's attributes, we rename the generated attributes in place. + *

+ * E.g. {@code ... | rename a as z | eval a = b + 1 | ...} becomes {@code ... eval $$a = b + 1 | rename a as z, $$a as a ...} */ - public static LogicalPlan pushGeneratingPlanPastProjectAndOrderBy(UnaryPlan generatingPlan, List generatedAttributes) { + public static > LogicalPlan pushGeneratingPlanPastProjectAndOrderBy(Plan generatingPlan) { LogicalPlan child = generatingPlan.child(); - if (child instanceof OrderBy orderBy) { - Set evalFieldNames = new LinkedHashSet<>(Expressions.names(generatedAttributes)); + Set evalFieldNames = new LinkedHashSet<>(Expressions.names(generatingPlan.generatedAttributes())); // Look for attributes in the OrderBy's expressions and create aliases with temporary names for them. AttributeReplacement nonShadowedOrders = renameAttributesInExpressions(evalFieldNames, orderBy.order()); @@ -238,9 +265,66 @@ public static LogicalPlan pushGeneratingPlanPastProjectAndOrderBy(UnaryPlan gene } return orderBy.replaceChild(generatingPlan.replaceChild(orderBy.child())); - } else if (child instanceof Project) { - var projectWithEvalChild = pushDownPastProject(generatingPlan); - return projectWithEvalChild.withProjections(mergeOutputExpressions(generatedAttributes, projectWithEvalChild.projections())); + } else if (child instanceof Project project) { + // We need to account for attribute shadowing: a rename might rely on a name generated in an Eval/Grok/Dissect/Enrich. + // E.g. in: + // + // Eval[[2 * x{f}#1 AS y]] + // \_Project[[x{f}#1, y{f}#2, y{f}#2 AS z]] + // + // Just moving the Eval down breaks z because we shadow y{f}#2. + // Instead, we use a different alias in the Eval, eventually renaming back to y: + // + // Project[[x{f}#1, y{f}#2 as z, $$y{r}#3 as y]] + // \_Eval[[2 * x{f}#1 as $$y]] + + List generatedAttributes = generatingPlan.generatedAttributes(); + + @SuppressWarnings("unchecked") + Plan generatingPlanWithResolvedExpressions = (Plan) resolveRenamesFromProject(generatingPlan, project); + + Set namesReferencedInRenames = new HashSet<>(); + for (NamedExpression ne : project.projections()) { + if (ne instanceof Alias as) { + namesReferencedInRenames.addAll(as.child().references().names()); + } + } + Map renameGeneratedAttributeTo = newNamesForConflictingAttributes( + generatingPlan.generatedAttributes(), + namesReferencedInRenames + ); + List newNames = generatedAttributes.stream() + .map(attr -> renameGeneratedAttributeTo.getOrDefault(attr.name(), attr.name())) + .toList(); + Plan generatingPlanWithRenamedAttributes = generatingPlanWithResolvedExpressions.withGeneratedNames(newNames); + + // Put the project at the top, but include the generated attributes. + // Any generated attributes that had to be renamed need to be re-renamed to their original names. + List generatedAttributesRenamedToOriginal = new ArrayList<>(generatedAttributes.size()); + List renamedGeneratedAttributes = generatingPlanWithRenamedAttributes.generatedAttributes(); + for (int i = 0; i < generatedAttributes.size(); i++) { + Attribute originalAttribute = generatedAttributes.get(i); + Attribute renamedAttribute = renamedGeneratedAttributes.get(i); + if (originalAttribute.name().equals(renamedAttribute.name())) { + generatedAttributesRenamedToOriginal.add(renamedAttribute); + } else { + generatedAttributesRenamedToOriginal.add( + new Alias( + originalAttribute.source(), + originalAttribute.name(), + originalAttribute.qualifier(), + renamedAttribute, + originalAttribute.id(), + originalAttribute.synthetic() + ) + ); + } + } + + Project projectWithGeneratingChild = project.replaceChild(generatingPlanWithRenamedAttributes.replaceChild(project.child())); + return projectWithGeneratingChild.withProjections( + mergeOutputExpressions(generatedAttributesRenamedToOriginal, projectWithGeneratingChild.projections()) + ); } return generatingPlan; @@ -264,8 +348,9 @@ private static AttributeReplacement renameAttributesInExpressions( rewrittenExpressions.add(expr.transformUp(Attribute.class, attr -> { if (attributeNamesToRename.contains(attr.name())) { Alias renamedAttribute = aliasesForReplacedAttributes.computeIfAbsent(attr, a -> { - String tempName = SubstituteSurrogates.rawTemporaryName(a.name(), "temp_name", a.id().toString()); + String tempName = locallyUniqueTemporaryName(a.name(), "temp_name"); // TODO: this should be synthetic + // blocked on https://github.com/elastic/elasticsearch/issues/98703 return new Alias(a.source(), tempName, null, a, null, false); }); return renamedAttribute.toAttribute(); @@ -278,16 +363,28 @@ private static AttributeReplacement renameAttributesInExpressions( return new AttributeReplacement(rewrittenExpressions, aliasesForReplacedAttributes); } + private static Map newNamesForConflictingAttributes( + List potentiallyConflictingAttributes, + Set reservedNames + ) { + if (reservedNames.isEmpty()) { + return Map.of(); + } + + Map renameAttributeTo = new HashMap<>(); + for (Attribute attr : potentiallyConflictingAttributes) { + String name = attr.name(); + if (reservedNames.contains(name)) { + renameAttributeTo.putIfAbsent(name, locallyUniqueTemporaryName(name, "temp_name")); + } + } + + return renameAttributeTo; + } + public static Project pushDownPastProject(UnaryPlan parent) { if (parent.child() instanceof Project project) { - AttributeMap.Builder aliasBuilder = AttributeMap.builder(); - project.forEachExpression(Alias.class, a -> aliasBuilder.put(a.toAttribute(), a.child())); - var aliases = aliasBuilder.build(); - - var expressionsWithResolvedAliases = (UnaryPlan) parent.transformExpressionsOnly( - ReferenceAttribute.class, - r -> aliases.resolve(r, r) - ); + UnaryPlan expressionsWithResolvedAliases = resolveRenamesFromProject(parent, project); return project.replaceChild(expressionsWithResolvedAliases.replaceChild(project.child())); } else { @@ -295,6 +392,14 @@ public static Project pushDownPastProject(UnaryPlan parent) { } } + private static UnaryPlan resolveRenamesFromProject(UnaryPlan plan, Project project) { + AttributeMap.Builder aliasBuilder = AttributeMap.builder(); + project.forEachExpression(Alias.class, a -> aliasBuilder.put(a.toAttribute(), a.child())); + var aliases = aliasBuilder.build(); + + return (UnaryPlan) plan.transformExpressionsOnly(ReferenceAttribute.class, r -> aliases.resolve(r, r)); + } + public abstract static class ParameterizedOptimizerRule extends ParameterizedRule< SubPlan, LogicalPlan, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index c02b9948def3f..fe1a66737b17b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -14,12 +14,11 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.plan.QueryPlan; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.GeneratingPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; @@ -103,18 +102,12 @@ protected AttributeSet generates(LogicalPlan logicalPlan) { || logicalPlan instanceof Aggregate) { return logicalPlan.outputSet(); } - if (logicalPlan instanceof Eval eval) { - return new AttributeSet(Expressions.asAttributes(eval.fields())); - } - if (logicalPlan instanceof RegexExtract extract) { - return new AttributeSet(extract.extractedFields()); + if (logicalPlan instanceof GeneratingPlan generating) { + return new AttributeSet(generating.generatedAttributes()); } if (logicalPlan instanceof MvExpand mvExpand) { return new AttributeSet(mvExpand.expanded()); } - if (logicalPlan instanceof Enrich enrich) { - return new AttributeSet(Expressions.asAttributes(enrich.enrichFields())); - } return AttributeSet.EMPTY; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java index f6a0154108f2d..7e102a36828a9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java @@ -12,11 +12,9 @@ import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import static org.elasticsearch.xpack.esql.core.expression.Expressions.asAttributes; - public final class PushDownEnrich extends OptimizerRules.OptimizerRule { @Override protected LogicalPlan rule(Enrich en) { - return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(en, asAttributes(en.enrichFields())); + return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(en); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java index b936e5569c950..e9b42be8dd397 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java @@ -12,11 +12,9 @@ import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Eval; -import static org.elasticsearch.xpack.esql.core.expression.Expressions.asAttributes; - public final class PushDownEval extends OptimizerRules.OptimizerRule { @Override protected LogicalPlan rule(Eval eval) { - return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(eval, asAttributes(eval.fields())); + return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(eval); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java index f247d0a631b29..43e13a582276b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java @@ -15,6 +15,6 @@ public final class PushDownRegexExtract extends OptimizerRules.OptimizerRule { @Override protected LogicalPlan rule(RegexExtract re) { - return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(re, re.extractedFields()); + return LogicalPlanOptimizer.pushGeneratingPlanPastProjectAndOrderBy(re); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java index 476da7476f7fb..e9900d96710a6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java @@ -19,7 +19,7 @@ import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSurrogates.rawTemporaryName; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.rawTemporaryName; public final class ReplaceOrderByExpressionWithEval extends OptimizerRules.OptimizerRule { private static int counter = 0; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java index 012d6e307df6c..dbe518770c78d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Project; @@ -150,6 +151,6 @@ protected LogicalPlan rule(Aggregate aggregate) { } static String syntheticName(Expression expression, Expression af, int counter) { - return SubstituteSurrogates.temporaryName(expression, af, counter); + return LogicalPlanOptimizer.temporaryName(expression, af, counter); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java index 99b0c8047f2ba..099e76010488e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; @@ -141,6 +142,6 @@ protected LogicalPlan rule(Aggregate aggregate) { } static String syntheticName(Expression expression, AggregateFunction af, int counter) { - return SubstituteSurrogates.temporaryName(expression, af, counter); + return LogicalPlanOptimizer.temporaryName(expression, af, counter); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java index b734a72ef5e22..b119d01715dce 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java @@ -14,13 +14,13 @@ import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Project; @@ -81,7 +81,7 @@ protected LogicalPlan rule(Aggregate aggregate) { var attr = aggFuncToAttr.get(af); // the agg doesn't exist in the Aggregate, create an alias for it and save its attribute if (attr == null) { - var temporaryName = temporaryName(af, agg, counter[0]++); + var temporaryName = LogicalPlanOptimizer.temporaryName(af, agg, counter[0]++); // create a synthetic alias (so it doesn't clash with a user defined name) var newAlias = new Alias(agg.source(), temporaryName, null, af, null, true); attr = newAlias.toAttribute(); @@ -134,28 +134,4 @@ protected LogicalPlan rule(Aggregate aggregate) { return plan; } - - public static String temporaryName(Expression inner, Expression outer, int suffix) { - String in = toString(inner); - String out = toString(outer); - return rawTemporaryName(in, out, String.valueOf(suffix)); - } - - public static String rawTemporaryName(String inner, String outer, String suffix) { - return FieldAttribute.SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + suffix; - } - - static int TO_STRING_LIMIT = 16; - - static String toString(Expression ex) { - return ex instanceof AggregateFunction af ? af.functionName() : extractString(ex); - } - - static String extractString(Expression ex) { - return ex instanceof NamedExpression ne ? ne.name() : limitToString(ex.sourceText()).replace(' ', '_'); - } - - static String limitToString(String string) { - return string.length() > 16 ? string.substring(0, TO_STRING_LIMIT - 1) + ">" : string; - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 526cf7f17440d..85103109abed1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; -import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; import org.elasticsearch.xpack.esql.core.parser.ParserUtils; @@ -194,21 +193,20 @@ public PlanFactory visitDissectCommand(EsqlBaseParser.DissectCommandContext ctx) try { DissectParser parser = new DissectParser(pattern, appendSeparator); + Set referenceKeys = parser.referenceKeys(); - if (referenceKeys.size() > 0) { + if (referenceKeys.isEmpty() == false) { throw new ParsingException( src, "Reference keys not supported in dissect patterns: [%{*{}}]", referenceKeys.iterator().next() ); } - List keys = new ArrayList<>(); - for (var x : parser.outputKeys()) { - if (x.isEmpty() == false) { - keys.add(new ReferenceAttribute(src, x, DataType.KEYWORD)); - } - } - return new Dissect(src, p, expression(ctx.primaryExpression()), new Dissect.Parser(pattern, appendSeparator, parser), keys); + + Dissect.Parser esqlDissectParser = new Dissect.Parser(pattern, appendSeparator, parser); + List keys = esqlDissectParser.keyAttributes(src); + + return new Dissect(src, p, expression(ctx.primaryExpression()), esqlDissectParser, keys); } catch (DissectException e) { throw new ParsingException(src, "Invalid pattern for dissect: [{}]", pattern); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/GeneratingPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/GeneratingPlan.java new file mode 100644 index 0000000000000..0253ac8dafd84 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/GeneratingPlan.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan; + +import org.elasticsearch.xpack.esql.core.expression.Attribute; + +import java.util.List; + +/** + * A plan that creates new {@link Attribute}s and appends them to the child + * {@link org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan}'s attributes. + * Attributes are appended on the right hand side of the child's input. In case of name conflicts, the rightmost attribute with + * a given name shadows any attributes left of it + * (c.f. {@link org.elasticsearch.xpack.esql.expression.NamedExpressions#mergeOutputAttributes(List, List)}). + */ +public interface GeneratingPlan> { + List generatedAttributes(); + + /** + * Create a new instance of this node with new output {@link Attribute}s using the given names. + * If an output attribute already has the desired name, we continue using it; otherwise, we + * create a new attribute with a new {@link org.elasticsearch.xpack.esql.core.expression.NameId}. + */ + // TODO: the generated attributes should probably become synthetic once renamed + // blocked on https://github.com/elastic/elasticsearch/issues/98703 + PlanType withGeneratedNames(List newNames); + + default void checkNumberOfNewNames(List newNames) { + if (newNames.size() != generatedAttributes().size()) { + throw new IllegalArgumentException( + "Number of new names is [" + newNames.size() + "] but there are [" + generatedAttributes().size() + "] existing names." + ); + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java index 1307d1870bba4..58167381ea9e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java @@ -10,11 +10,14 @@ import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -23,6 +26,17 @@ public class Dissect extends RegexExtract { public record Parser(String pattern, String appendSeparator, DissectParser parser) { + public List keyAttributes(Source src) { + List keys = new ArrayList<>(); + for (var x : parser.outputKeys()) { + if (x.isEmpty() == false) { + keys.add(new ReferenceAttribute(src, x, DataType.KEYWORD)); + } + } + + return keys; + } + // Override hashCode and equals since the parser is considered equal if its pattern and // appendSeparator are equal ( and DissectParser uses reference equality ) @Override @@ -54,6 +68,11 @@ protected NodeInfo info() { return NodeInfo.create(this, Dissect::new, child(), input, parser, extractedFields); } + @Override + public Dissect withGeneratedNames(List newNames) { + return new Dissect(source(), child(), input, parser, renameExtractedFields(newNames)); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java index f418ab5da1c9d..5a3b5b5d1875f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java @@ -10,27 +10,34 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; +import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.plan.GeneratingPlan; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.xpack.esql.core.expression.Expressions.asAttributes; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; -public class Enrich extends UnaryPlan { +public class Enrich extends UnaryPlan implements GeneratingPlan { private final Expression policyName; private final NamedExpression matchField; private final EnrichPolicy policy; private final Map concreteIndices; // cluster -> enrich indices + // This could be simplified by just always using an Alias. private final List enrichFields; private List output; @@ -128,6 +135,32 @@ public List output() { return output; } + @Override + public List generatedAttributes() { + return asAttributes(enrichFields); + } + + @Override + public Enrich withGeneratedNames(List newNames) { + checkNumberOfNewNames(newNames); + + List newEnrichFields = new ArrayList<>(enrichFields.size()); + for (int i = 0; i < enrichFields.size(); i++) { + NamedExpression enrichField = enrichFields.get(i); + String newName = newNames.get(i); + if (enrichField.name().equals(newName)) { + newEnrichFields.add(enrichField); + } else if (enrichField instanceof ReferenceAttribute ra) { + newEnrichFields.add(new Alias(ra.source(), newName, ra.qualifier(), ra, new NameId(), ra.synthetic())); + } else if (enrichField instanceof Alias a) { + newEnrichFields.add(new Alias(a.source(), newName, a.qualifier(), a.child(), new NameId(), a.synthetic())); + } else { + throw new IllegalArgumentException("Enrich field must be Alias or ReferenceAttribute"); + } + } + return new Enrich(source(), child(), mode(), policyName(), matchField(), policy(), concreteIndices(), newEnrichFields); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java index bfe11c3d33d87..108122d4b163c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java @@ -10,17 +10,23 @@ import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeMap; +import org.elasticsearch.xpack.esql.core.expression.NameId; +import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.plan.GeneratingPlan; +import java.util.ArrayList; import java.util.List; import java.util.Objects; +import static org.elasticsearch.xpack.esql.core.expression.Expressions.asAttributes; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; -public class Eval extends UnaryPlan { +public class Eval extends UnaryPlan implements GeneratingPlan { private final List fields; private List lazyOutput; @@ -43,6 +49,50 @@ public List output() { return lazyOutput; } + @Override + public List generatedAttributes() { + return asAttributes(fields); + } + + @Override + public Eval withGeneratedNames(List newNames) { + checkNumberOfNewNames(newNames); + + return new Eval(source(), child(), renameAliases(fields, newNames)); + } + + private List renameAliases(List originalAttributes, List newNames) { + AttributeMap.Builder aliasReplacedByBuilder = AttributeMap.builder(); + List newFields = new ArrayList<>(originalAttributes.size()); + for (int i = 0; i < originalAttributes.size(); i++) { + Alias field = originalAttributes.get(i); + String newName = newNames.get(i); + if (field.name().equals(newName)) { + newFields.add(field); + } else { + Alias newField = new Alias(field.source(), newName, field.qualifier(), field.child(), new NameId(), field.synthetic()); + newFields.add(newField); + aliasReplacedByBuilder.put(field.toAttribute(), newField.toAttribute()); + } + } + AttributeMap aliasReplacedBy = aliasReplacedByBuilder.build(); + + // We need to also update any references to the old attributes in the new attributes; e.g. + // EVAL x = 1, y = x + 1 + // renaming x, y to x1, y1 + // so far became + // EVAL x1 = 1, y1 = x + 1 + // - but x doesn't exist anymore, so replace it by x1 to obtain + // EVAL x1 = 1, y1 = x1 + 1 + + List newFieldsWithUpdatedRefs = new ArrayList<>(originalAttributes.size()); + for (Alias newField : newFields) { + newFieldsWithUpdatedRefs.add((Alias) newField.transformUp(ReferenceAttribute.class, r -> aliasReplacedBy.resolve(r, r))); + } + + return newFieldsWithUpdatedRefs; + } + @Override public boolean expressionsResolved() { return Resolvables.resolved(fields); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java index e495a2eb76668..3bd870e326157 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java @@ -105,6 +105,11 @@ public List output() { return NamedExpressions.mergeOutputAttributes(extractedFields, child().output()); } + @Override + public Grok withGeneratedNames(List newNames) { + return new Grok(source(), child(), input, parser, renameExtractedFields(newNames)); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java index 5bf45fc0f61ad..7c1d457c18e55 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java @@ -9,16 +9,19 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.plan.GeneratingPlan; +import java.util.ArrayList; import java.util.List; import java.util.Objects; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; -public abstract class RegexExtract extends UnaryPlan { +public abstract class RegexExtract extends UnaryPlan implements GeneratingPlan { protected final Expression input; protected final List extractedFields; @@ -42,10 +45,36 @@ public Expression input() { return input; } + /** + * Upon parsing, these are named according to the {@link Dissect} or {@link Grok} pattern, but can be renamed without changing the + * pattern. + */ public List extractedFields() { return extractedFields; } + @Override + public List generatedAttributes() { + return extractedFields; + } + + List renameExtractedFields(List newNames) { + checkNumberOfNewNames(newNames); + + List renamedExtractedFields = new ArrayList<>(extractedFields.size()); + for (int i = 0; i < newNames.size(); i++) { + Attribute extractedField = extractedFields.get(i); + String newName = newNames.get(i); + if (extractedField.name().equals(newName)) { + renamedExtractedFields.add(extractedField); + } else { + renamedExtractedFields.add(extractedFields.get(i).withName(newNames.get(i)).withId(new NameId())); + } + } + + return renamedExtractedFields; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index ddf5fa6eaf8a3..28855abfff73c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -58,6 +58,8 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; @@ -417,12 +419,14 @@ private PhysicalOperation planDissect(DissectExec dissect, LocalExecutionPlanner Layout.Builder layoutBuilder = source.layout.builder(); layoutBuilder.append(dissect.extractedFields()); final Expression expr = dissect.inputExpression(); - String[] attributeNames = Expressions.names(dissect.extractedFields()).toArray(new String[0]); + // Names in the pattern and layout can differ. + // Attributes need to be rename-able to avoid problems with shadowing - see GeneratingPlan resp. PushDownRegexExtract. + String[] patternNames = Expressions.names(dissect.parser().keyAttributes(Source.EMPTY)).toArray(new String[0]); Layout layout = layoutBuilder.build(); source = source.with( new StringExtractOperator.StringExtractOperatorFactory( - attributeNames, + patternNames, EvalMapper.toEvaluator(expr, layout), () -> (input) -> dissect.parser().parser().parse(input) ), @@ -439,11 +443,15 @@ private PhysicalOperation planGrok(GrokExec grok, LocalExecutionPlannerContext c Map fieldToPos = new HashMap<>(extractedFields.size()); Map fieldToType = new HashMap<>(extractedFields.size()); ElementType[] types = new ElementType[extractedFields.size()]; + List extractedFieldsFromPattern = grok.pattern().extractedFields(); for (int i = 0; i < extractedFields.size(); i++) { - Attribute extractedField = extractedFields.get(i); - ElementType type = PlannerUtils.toElementType(extractedField.dataType()); - fieldToPos.put(extractedField.name(), i); - fieldToType.put(extractedField.name(), type); + DataType extractedFieldType = extractedFields.get(i).dataType(); + // Names in pattern and layout can differ. + // Attributes need to be rename-able to avoid problems with shadowing - see GeneratingPlan resp. PushDownRegexExtract. + String patternName = extractedFieldsFromPattern.get(i).name(); + ElementType type = PlannerUtils.toElementType(extractedFieldType); + fieldToPos.put(patternName, i); + fieldToType.put(patternName, type); types[i] = type; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index e7a999b892f44..669de17891583 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.QuantileStates; import org.elasticsearch.core.Tuple; +import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.IndexMode; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.EsqlTestUtils; @@ -21,6 +22,7 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; +import org.elasticsearch.xpack.esql.core.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; @@ -42,6 +44,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; +import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import org.elasticsearch.xpack.esql.core.plan.logical.Filter; import org.elasticsearch.xpack.esql.core.plan.logical.Limit; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; @@ -71,6 +74,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; @@ -114,9 +118,13 @@ import org.elasticsearch.xpack.esql.optimizer.rules.LiteralsOnTheRight; import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineFilters; import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineLimits; +import org.elasticsearch.xpack.esql.optimizer.rules.PushDownEnrich; +import org.elasticsearch.xpack.esql.optimizer.rules.PushDownEval; +import org.elasticsearch.xpack.esql.optimizer.rules.PushDownRegexExtract; import org.elasticsearch.xpack.esql.optimizer.rules.SplitInWithFoldableValue; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.ParsingException; +import org.elasticsearch.xpack.esql.plan.GeneratingPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -140,6 +148,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Function; import static java.util.Arrays.asList; @@ -157,6 +166,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; import static org.elasticsearch.xpack.esql.EsqlTestUtils.localSource; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.referenceAttribute; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.analysis.Analyzer.NO_FIELDS; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; @@ -188,6 +198,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -1021,7 +1032,7 @@ public void testPushDownDissectPastProject() { var keep = as(plan, Project.class); var dissect = as(keep.child(), Dissect.class); - assertThat(dissect.extractedFields(), contains(new ReferenceAttribute(Source.EMPTY, "y", DataType.KEYWORD))); + assertThat(dissect.extractedFields(), contains(referenceAttribute("y", DataType.KEYWORD))); } public void testPushDownGrokPastProject() { @@ -1034,7 +1045,7 @@ public void testPushDownGrokPastProject() { var keep = as(plan, Project.class); var grok = as(keep.child(), Grok.class); - assertThat(grok.extractedFields(), contains(new ReferenceAttribute(Source.EMPTY, "y", DataType.KEYWORD))); + assertThat(grok.extractedFields(), contains(referenceAttribute("y", DataType.KEYWORD))); } public void testPushDownFilterPastProjectUsingEval() { @@ -4254,6 +4265,210 @@ public void testPushdownWithOverwrittenName() { } } + record PushdownShadowingGeneratingPlanTestCase( + BiFunction applyLogicalPlan, + OptimizerRules.OptimizerRule rule + ) {}; + + static PushdownShadowingGeneratingPlanTestCase[] PUSHDOWN_SHADOWING_GENERATING_PLAN_TEST_CASES = { + // | EVAL y = to_integer(x), y = y + 1 + new PushdownShadowingGeneratingPlanTestCase((plan, attr) -> { + Alias y1 = new Alias(EMPTY, "y", new ToInteger(EMPTY, attr)); + Alias y2 = new Alias(EMPTY, "y", new Add(EMPTY, y1.toAttribute(), new Literal(EMPTY, 1, INTEGER))); + return new Eval(EMPTY, plan, List.of(y1, y2)); + }, new PushDownEval()), + // | DISSECT x "%{y} %{y}" + new PushdownShadowingGeneratingPlanTestCase( + (plan, attr) -> new Dissect( + EMPTY, + plan, + attr, + new Dissect.Parser("%{y} %{y}", ",", new DissectParser("%{y} %{y}", ",")), + List.of(new ReferenceAttribute(EMPTY, "y", KEYWORD), new ReferenceAttribute(EMPTY, "y", KEYWORD)) + ), + new PushDownRegexExtract() + ), + // | GROK x "%{WORD:y} %{WORD:y}" + new PushdownShadowingGeneratingPlanTestCase( + (plan, attr) -> new Grok(EMPTY, plan, attr, Grok.pattern(EMPTY, "%{WORD:y} %{WORD:y}")), + new PushDownRegexExtract() + ), + // | ENRICH some_policy ON x WITH y = some_enrich_idx_field, y = some_other_enrich_idx_field + new PushdownShadowingGeneratingPlanTestCase( + (plan, attr) -> new Enrich( + EMPTY, + plan, + Enrich.Mode.ANY, + new Literal(EMPTY, "some_policy", KEYWORD), + attr, + null, + Map.of(), + List.of( + new Alias(EMPTY, "y", new ReferenceAttribute(EMPTY, "some_enrich_idx_field", KEYWORD)), + new Alias(EMPTY, "y", new ReferenceAttribute(EMPTY, "some_other_enrich_idx_field", KEYWORD)) + ) + ), + new PushDownEnrich() + ) }; + + /** + * Consider + * + * Eval[[TO_INTEGER(x{r}#2) AS y, y{r}#4 + 1[INTEGER] AS y]] + * \_Project[[y{r}#3, x{r}#2]] + * \_Row[[1[INTEGER] AS x, 2[INTEGER] AS y]] + * + * We can freely push down the Eval without renaming, but need to update the Project's references. + * + * Project[[x{r}#2, y{r}#6 AS y]] + * \_Eval[[TO_INTEGER(x{r}#2) AS y, y{r}#4 + 1[INTEGER] AS y]] + * \_Row[[1[INTEGER] AS x, 2[INTEGER] AS y]] + * + * And similarly for dissect, grok and enrich. + */ + public void testPushShadowingGeneratingPlanPastProject() { + Alias x = new Alias(EMPTY, "x", new Literal(EMPTY, "1", KEYWORD)); + Alias y = new Alias(EMPTY, "y", new Literal(EMPTY, "2", KEYWORD)); + LogicalPlan initialRow = new Row(EMPTY, List.of(x, y)); + LogicalPlan initialProject = new Project(EMPTY, initialRow, List.of(y.toAttribute(), x.toAttribute())); + + for (PushdownShadowingGeneratingPlanTestCase testCase : PUSHDOWN_SHADOWING_GENERATING_PLAN_TEST_CASES) { + LogicalPlan initialPlan = testCase.applyLogicalPlan.apply(initialProject, x.toAttribute()); + @SuppressWarnings("unchecked") + List initialGeneratedExprs = ((GeneratingPlan) initialPlan).generatedAttributes(); + LogicalPlan optimizedPlan = testCase.rule.apply(initialPlan); + + Failures inconsistencies = LogicalVerifier.INSTANCE.verify(optimizedPlan); + assertFalse(inconsistencies.hasFailures()); + + Project project = as(optimizedPlan, Project.class); + LogicalPlan pushedDownGeneratingPlan = project.child(); + + List projections = project.projections(); + @SuppressWarnings("unchecked") + List newGeneratedExprs = ((GeneratingPlan) pushedDownGeneratingPlan).generatedAttributes(); + assertEquals(newGeneratedExprs, initialGeneratedExprs); + // The rightmost generated attribute makes it into the final output as "y". + Attribute rightmostGenerated = newGeneratedExprs.get(newGeneratedExprs.size() - 1); + + assertThat(Expressions.names(projections), contains("x", "y")); + assertThat(projections, everyItem(instanceOf(ReferenceAttribute.class))); + ReferenceAttribute yShadowed = as(projections.get(1), ReferenceAttribute.class); + assertTrue(yShadowed.semanticEquals(rightmostGenerated)); + } + } + + /** + * Consider + * + * Eval[[TO_INTEGER(x{r}#2) AS y, y{r}#4 + 1[INTEGER] AS y]] + * \_Project[[x{r}#2, y{r}#3, y{r}#3 AS z]] + * \_Row[[1[INTEGER] AS x, 2[INTEGER] AS y]] + * + * To push down the Eval, we must not shadow the reference y{r}#3, so we rename. + * + * Project[[x{r}#2, y{r}#3 AS z, $$y$temp_name$10{r}#12 AS y]] + * Eval[[TO_INTEGER(x{r}#2) AS $$y$temp_name$10, $$y$temp_name$10{r}#11 + 1[INTEGER] AS $$y$temp_name$10]] + * \_Row[[1[INTEGER] AS x, 2[INTEGER] AS y]] + * + * And similarly for dissect, grok and enrich. + */ + public void testPushShadowingGeneratingPlanPastRenamingProject() { + Alias x = new Alias(EMPTY, "x", new Literal(EMPTY, "1", KEYWORD)); + Alias y = new Alias(EMPTY, "y", new Literal(EMPTY, "2", KEYWORD)); + LogicalPlan initialRow = new Row(EMPTY, List.of(x, y)); + LogicalPlan initialProject = new Project( + EMPTY, + initialRow, + List.of(x.toAttribute(), y.toAttribute(), new Alias(EMPTY, "z", y.toAttribute())) + ); + + for (PushdownShadowingGeneratingPlanTestCase testCase : PUSHDOWN_SHADOWING_GENERATING_PLAN_TEST_CASES) { + LogicalPlan initialPlan = testCase.applyLogicalPlan.apply(initialProject, x.toAttribute()); + @SuppressWarnings("unchecked") + List initialGeneratedExprs = ((GeneratingPlan) initialPlan).generatedAttributes(); + LogicalPlan optimizedPlan = testCase.rule.apply(initialPlan); + + Failures inconsistencies = LogicalVerifier.INSTANCE.verify(optimizedPlan); + assertFalse(inconsistencies.hasFailures()); + + Project project = as(optimizedPlan, Project.class); + LogicalPlan pushedDownGeneratingPlan = project.child(); + + List projections = project.projections(); + @SuppressWarnings("unchecked") + List newGeneratedExprs = ((GeneratingPlan) pushedDownGeneratingPlan).generatedAttributes(); + List newNames = Expressions.names(newGeneratedExprs); + assertThat(newNames.size(), equalTo(initialGeneratedExprs.size())); + assertThat(newNames, everyItem(startsWith("$$y$temp_name$"))); + // The rightmost generated attribute makes it into the final output as "y". + Attribute rightmostGeneratedWithNewName = newGeneratedExprs.get(newGeneratedExprs.size() - 1); + + assertThat(Expressions.names(projections), contains("x", "z", "y")); + assertThat(projections.get(0), instanceOf(ReferenceAttribute.class)); + Alias zAlias = as(projections.get(1), Alias.class); + ReferenceAttribute yRenamed = as(zAlias.child(), ReferenceAttribute.class); + assertEquals(yRenamed.name(), "y"); + Alias yAlias = as(projections.get(2), Alias.class); + ReferenceAttribute yTempRenamed = as(yAlias.child(), ReferenceAttribute.class); + assertTrue(yTempRenamed.semanticEquals(rightmostGeneratedWithNewName)); + } + } + + /** + * Consider + * + * Eval[[TO_INTEGER(x{r}#2) AS y, y{r}#3 + 1[INTEGER] AS y]] + * \_Project[[y{r}#1, y{r}#1 AS x]] + * \_Row[[2[INTEGER] AS y]] + * + * To push down the Eval, we must not shadow the reference y{r}#1, so we rename. + * Additionally, the rename "y AS x" needs to be propagated into the Eval. + * + * Project[[y{r}#1 AS x, $$y$temp_name$10{r}#12 AS y]] + * Eval[[TO_INTEGER(y{r}#1) AS $$y$temp_name$10, $$y$temp_name$10{r}#11 + 1[INTEGER] AS $$y$temp_name$10]] + * \_Row[[2[INTEGER] AS y]] + * + * And similarly for dissect, grok and enrich. + */ + public void testPushShadowingGeneratingPlanPastRenamingProjectWithResolution() { + Alias y = new Alias(EMPTY, "y", new Literal(EMPTY, "2", KEYWORD)); + Alias yAliased = new Alias(EMPTY, "x", y.toAttribute()); + LogicalPlan initialRow = new Row(EMPTY, List.of(y)); + LogicalPlan initialProject = new Project(EMPTY, initialRow, List.of(y.toAttribute(), yAliased)); + + for (PushdownShadowingGeneratingPlanTestCase testCase : PUSHDOWN_SHADOWING_GENERATING_PLAN_TEST_CASES) { + LogicalPlan initialPlan = testCase.applyLogicalPlan.apply(initialProject, yAliased.toAttribute()); + @SuppressWarnings("unchecked") + List initialGeneratedExprs = ((GeneratingPlan) initialPlan).generatedAttributes(); + LogicalPlan optimizedPlan = testCase.rule.apply(initialPlan); + + // This ensures that our generating plan doesn't use invalid references, resp. that any rename from the Project has + // been propagated into the generating plan. + Failures inconsistencies = LogicalVerifier.INSTANCE.verify(optimizedPlan); + assertFalse(inconsistencies.hasFailures()); + + Project project = as(optimizedPlan, Project.class); + LogicalPlan pushedDownGeneratingPlan = project.child(); + + List projections = project.projections(); + @SuppressWarnings("unchecked") + List newGeneratedExprs = ((GeneratingPlan) pushedDownGeneratingPlan).generatedAttributes(); + List newNames = Expressions.names(newGeneratedExprs); + assertThat(newNames.size(), equalTo(initialGeneratedExprs.size())); + assertThat(newNames, everyItem(startsWith("$$y$temp_name$"))); + // The rightmost generated attribute makes it into the final output as "y". + Attribute rightmostGeneratedWithNewName = newGeneratedExprs.get(newGeneratedExprs.size() - 1); + + assertThat(Expressions.names(projections), contains("x", "y")); + Alias yRenamed = as(projections.get(0), Alias.class); + assertTrue(yRenamed.child().semanticEquals(y.toAttribute())); + Alias yTempRenamed = as(projections.get(1), Alias.class); + ReferenceAttribute yTemp = as(yTempRenamed.child(), ReferenceAttribute.class); + assertTrue(yTemp.semanticEquals(rightmostGeneratedWithNewName)); + } + } + /** * Expects * Project[[min{r}#4, languages{f}#11]] diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java index 545f3efe8ca79..63204b4dd797d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -55,10 +54,6 @@ static UnresolvedAttribute attribute(String name) { return new UnresolvedAttribute(EMPTY, name); } - static ReferenceAttribute referenceAttribute(String name, DataType type) { - return new ReferenceAttribute(EMPTY, name, type); - } - static Literal integer(int i) { return new Literal(EMPTY, i, DataType.INTEGER); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 111c90790caf0..a195fb8180bf3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -53,6 +53,7 @@ import java.util.function.Function; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.referenceAttribute; import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; From 2018c462413f48f66d06a9d778663d98eb2f048c Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 24 Jul 2024 13:46:08 +0300 Subject: [PATCH 110/406] backporting (#111230) --- .../org/elasticsearch/test/ESSingleNodeTestCase.java | 4 +--- .../main/java/org/elasticsearch/test/ESTestCase.java | 12 ++++++++++++ .../org/elasticsearch/test/InternalTestCluster.java | 10 +--------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 7fdc5765a90e8..a538c39704a73 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -133,9 +133,7 @@ public void tearDown() throws Exception { ensureNoInitializingShards(); ensureAllFreeContextActionsAreConsumed(); - SearchService searchService = getInstanceFromNode(SearchService.class); - assertThat(searchService.getActiveContexts(), equalTo(0)); - assertThat(searchService.getOpenScrollContexts(), equalTo(0)); + ensureAllContextsReleased(getInstanceFromNode(SearchService.class)); super.tearDown(); var deleteDataStreamsRequest = new DeleteDataStreamAction.Request("*"); deleteDataStreamsRequest.indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index add0de1993233..81db437483c68 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -112,6 +112,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.MockSearchService; +import org.elasticsearch.search.SearchService; import org.elasticsearch.test.junit.listeners.LoggingListener; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; import org.elasticsearch.threadpool.ExecutorBuilder; @@ -2423,4 +2424,15 @@ public static T expectThrows(Class expectedType, Reques () -> builder.get().decRef() // dec ref if we unexpectedly fail to not leak transport response ); } + + public static void ensureAllContextsReleased(SearchService searchService) { + try { + assertBusy(() -> { + assertThat(searchService.getActiveContexts(), equalTo(0)); + assertThat(searchService.getOpenScrollContexts(), equalTo(0)); + }); + } catch (Exception e) { + throw new AssertionError("Failed to verify search contexts", e); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index bb78c43fca449..3a5961d4e7ddf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -2569,15 +2569,7 @@ public void assertRequestsFinished() { private void assertSearchContextsReleased() { for (NodeAndClient nodeAndClient : nodes.values()) { - SearchService searchService = getInstance(SearchService.class, nodeAndClient.name); - try { - assertBusy(() -> { - assertThat(searchService.getActiveContexts(), equalTo(0)); - assertThat(searchService.getOpenScrollContexts(), equalTo(0)); - }); - } catch (Exception e) { - throw new AssertionError("Failed to verify search contexts", e); - } + ESTestCase.ensureAllContextsReleased(getInstance(SearchService.class, nodeAndClient.name)); } } From d4115943d869c671785cc2c9312cbf15bac7c409 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 24 Jul 2024 15:06:22 +0200 Subject: [PATCH 111/406] Extend timeout of Krb5kDcContainer test container startup (#111198) (#111200) - testcontainer startup timeout defaults to 60s and we see ocassionally this fixture taking longer to startup in certain environments fixes #111140 --- .../elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java index fa75b57ea87a6..14357b6d47bbc 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java +++ b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java @@ -22,6 +22,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -74,6 +75,7 @@ public Krb5kDcContainer(ProvisioningId provisioningId) { this.provisioningId = provisioningId; withNetwork(Network.newNetwork()); addExposedPorts(88, 4444); + withStartupTimeout(Duration.ofMinutes(2)); withCreateContainerCmdModifier(cmd -> { // Add previously exposed ports and UDP port List exposedPorts = new ArrayList<>(); From 9321452bd628cce07dea20d58b422fa98bf1cf25 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 24 Jul 2024 07:29:09 -0700 Subject: [PATCH 112/406] Revert "Mute org.elasticsearch.packaging.test.DockerTests test600Interrupt #111132" This reverts commit b521b48bf0c808d45a294334f1656ea73bc974e3. --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 25459c3a29e61..dc71ba3647290 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -89,9 +89,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/110343 - class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT issue: https://github.com/elastic/elasticsearch/issues/111124 -- class: org.elasticsearch.packaging.test.DockerTests - method: test600Interrupt - issue: https://github.com/elastic/elasticsearch/issues/111132 # Examples: # From 8186f7a835c0e87b891ace5e31efb989cab4a136 Mon Sep 17 00:00:00 2001 From: Valeriy Khakhutskyy <1292899+valeriy42@users.noreply.github.com> Date: Thu, 25 Jul 2024 10:55:47 +0200 Subject: [PATCH 113/406] [ML] Extend lat_long documentation (#111239) (#111266) This PR adds the explanation of what "typical" means for the lat_long function. --- .../anomaly-detection/functions/ml-geo-functions.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc index 5c061daa1cd44..63a0f047db647 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc @@ -52,6 +52,12 @@ detects anomalies where the geographic location of a credit card transaction is unusual for a particular customer’s credit card. An anomaly might indicate fraud. +A "typical" value indicates a centroid of a cluster of previously observed +locations that is closest to the "actual" location at that time. For example, +there may be one centroid near the person's home that is associated with the +cluster of local grocery stores and restaurants, and another centroid near the +person's work associated with the cluster of lunch and coffee places. + IMPORTANT: The `field_name` that you supply must be a single string that contains two comma-separated numbers of the form `latitude,longitude`, a `geo_point` field, a `geo_shape` field that contains point values, or a From 531f0291ce0054eed94282ff91cfcfad25a64e37 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 25 Jul 2024 12:18:37 +0100 Subject: [PATCH 114/406] Update README.asciidoc (#111244) (#111269) adding text to match the positioning that Shay asked for on https://www.elastic.co/elasticsearch (cherry picked from commit 66218164a0aabe5910e13c7627cc1453378c2bce) Co-authored-by: Serena Chou --- README.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.asciidoc b/README.asciidoc index dc27735d3c015..fa479d9c76340 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -1,6 +1,6 @@ = Elasticsearch -Elasticsearch is a distributed search and analytics engine optimized for speed and relevance on production-scale workloads. Elasticsearch is the foundation of Elastic's open Stack platform. Search in near real-time over massive datasets, perform vector searches, integrate with generative AI applications, and much more. +Elasticsearch is a distributed search and analytics engine, scalable data store and vector database optimized for speed and relevance on production-scale workloads. Elasticsearch is the foundation of Elastic's open Stack platform. Search in near real-time over massive datasets, perform vector searches, integrate with generative AI applications, and much more. Use cases enabled by Elasticsearch include: From a5e13e092053c4382bb5acbdf8e7209b23c5b605 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 25 Jul 2024 08:52:59 -0600 Subject: [PATCH 115/406] (Doc+) How to resolve shards >50GB (#111254) (#111289) * (Doc+) How to resolve shards >50GB --------- Co-authored-by: Ievgen Degtiarenko --- .../how-to/size-your-shards.asciidoc | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 31f4039bcfaca..6baac25aa0532 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -152,9 +152,10 @@ same data. However, very large shards can also cause slower searches and will take longer to recover after a failure. There is no hard limit on the physical size of a shard, and each shard can in -theory contain up to just over two billion documents. However, experience shows -that shards between 10GB and 50GB typically work well for many use cases, as -long as the per-shard document count is kept below 200 million. +theory contain up to <>. However, experience shows that shards between 10GB and 50GB +typically work well for many use cases, as long as the per-shard document count +is kept below 200 million. You may be able to use larger shards depending on your network and use case, and smaller shards may be appropriate for @@ -184,6 +185,29 @@ index prirep shard store // TESTRESPONSE[s/\.ds-my-data-stream-2099\.05\.06-000001/my-index-000001/] // TESTRESPONSE[s/50gb/.*/] +If an index's shard is experiencing degraded performance from surpassing the +recommended 50GB size, you may consider fixing the index's shards' sizing. +Shards are immutable and therefore their size is fixed in place, +so indices must be copied with corrected settings. This requires first ensuring +sufficient disk to copy the data. Afterwards, you can copy the index's data +with corrected settings via one of the following options: + +* running <> to increase number of primary +shards + +* creating a destination index with corrected settings and then running +<> + +Kindly note performing a <> and/or +<> would be insufficient to resolve shards' +sizing. + +Once a source index's data is copied into its destination index, the source +index can be <>. You may then consider setting +<> against the destination index for the source +index's name to point to it for continuity. + + [discrete] [[shard-count-recommendation]] ==== Master-eligible nodes should have at least 1GB of heap per 3000 indices From c808d33744d1ef8164bc09f981953eff5d64a9b5 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 25 Jul 2024 09:37:16 -0600 Subject: [PATCH 116/406] (Doc+) Link Gateway Settings to Full Restart (#110902) (#111292) * (Doc+) Link Gateway Settings to Full Restart --------- Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --- docs/reference/modules/gateway.asciidoc | 6 +++--- docs/reference/setup/restart-cluster.asciidoc | 2 +- docs/reference/upgrade/disable-shard-alloc.asciidoc | 4 ++++ 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index d6ee730d5021c..bf7e6de64f093 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -4,11 +4,11 @@ The local gateway stores the cluster state and shard data across full cluster restarts. -The following _static_ settings, which must be set on every master node, +The following _static_ settings, which must be set on every <>, control how long a freshly elected master should wait before it tries to -recover the cluster state and the cluster's data. +recover the <> and the cluster's data. -NOTE: These settings only take effect on a full cluster restart. +NOTE: These settings only take effect during a <>. `gateway.expected_data_nodes`:: (<>) diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc index 9488c6632836b..a3bf7723cb5a9 100644 --- a/docs/reference/setup/restart-cluster.asciidoc +++ b/docs/reference/setup/restart-cluster.asciidoc @@ -11,7 +11,7 @@ time, so the service remains uninterrupted. [WARNING] ==== Nodes exceeding the low watermark threshold will be slow to restart. Reduce the disk -usage below the <> before to restarting nodes. +usage below the <> before restarting nodes. ==== [discrete] diff --git a/docs/reference/upgrade/disable-shard-alloc.asciidoc b/docs/reference/upgrade/disable-shard-alloc.asciidoc index a93b6dfc6c60b..f69a673095257 100644 --- a/docs/reference/upgrade/disable-shard-alloc.asciidoc +++ b/docs/reference/upgrade/disable-shard-alloc.asciidoc @@ -17,3 +17,7 @@ PUT _cluster/settings } -------------------------------------------------- // TEST[skip:indexes don't assign] + +You can also consider <> when restarting +large clusters to reduce initial strain while nodes are processing +<>. \ No newline at end of file From 7d8fe4a6cc4c9fdb7d606b17a6c2e9d9319054d5 Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Thu, 25 Jul 2024 19:26:31 +0200 Subject: [PATCH 117/406] Update get snapshot status API doc (#111240) (#111295) Make it clear that this API should be used only if the detailed shard info is needed and only on ongoing snapshots. Remove incorrectly mentioned `STATE` value. --- .../apis/get-snapshot-status-api.asciidoc | 9 ++------- .../snapshots/status/TransportSnapshotsStatusAction.java | 3 +-- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc index d8b03cbc0e880..e677408da3f25 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc @@ -4,7 +4,7 @@ Get snapshot status ++++ -Retrieves a detailed description of the current state for each shard participating in the snapshot. +Retrieves a detailed description of the current state for each shard participating in the snapshot. Note that this API should only be used to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed, or you want to obtain information about one or more existing snapshots, use the <>. //// [source,console] @@ -172,13 +172,8 @@ Indicates the current snapshot state. `STARTED`:: The snapshot is currently running. -`PARTIAL`:: - The global cluster state was stored, but data of at least one shard was not stored successfully. - The <> section of the response contains more detailed information about shards - that were not processed correctly. - `SUCCESS`:: - The snapshot finished and all shards were stored successfully. + The snapshot completed. ==== -- diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 28f970eb8c9fe..7cf1d55622b7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -331,8 +331,7 @@ private void loadRepositoryData( final SnapshotsInProgress.State state = switch (snapshotInfo.state()) { case FAILED -> SnapshotsInProgress.State.FAILED; case SUCCESS, PARTIAL -> - // Translating both PARTIAL and SUCCESS to SUCCESS for now - // TODO: add the differentiation on the metadata level in the next major release + // Both of these means the snapshot has completed. SnapshotsInProgress.State.SUCCESS; default -> throw new IllegalArgumentException("Unexpected snapshot state " + snapshotInfo.state()); }; From 658a19eacc2cafae2ef7be1f1cb43ce0a9ec59fd Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 26 Jul 2024 18:10:55 +1000 Subject: [PATCH 118/406] Mute org.elasticsearch.packaging.test.DockerTests test600Interrupt #111324 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index dc71ba3647290..629acf8a767b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -89,6 +89,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/110343 - class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT issue: https://github.com/elastic/elasticsearch/issues/111124 +- class: org.elasticsearch.packaging.test.DockerTests + method: test600Interrupt + issue: https://github.com/elastic/elasticsearch/issues/111324 # Examples: # From c430bc843cd77547702b4e642e876cc1854b14de Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Fri, 26 Jul 2024 10:42:51 +0200 Subject: [PATCH 119/406] ESQL: Mark union types as experimental (#111297) (#111325) --- docs/reference/esql/esql-multi-index.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/esql/esql-multi-index.asciidoc b/docs/reference/esql/esql-multi-index.asciidoc index 41ff6a27417b1..cf98cbe959237 100644 --- a/docs/reference/esql/esql-multi-index.asciidoc +++ b/docs/reference/esql/esql-multi-index.asciidoc @@ -103,7 +103,7 @@ FROM events_* [source,bash] ---- -Cannot use field [client_ip] due to ambiguities being mapped as +Cannot use field [client_ip] due to ambiguities being mapped as [2] incompatible types: [ip] in [events_ip], [keyword] in [events_keyword] @@ -113,12 +113,14 @@ Cannot use field [client_ip] due to ambiguities being mapped as [[esql-multi-index-union-types]] === Union types +experimental::[] + {esql} has a way to handle <>. When the same field is mapped to multiple types in multiple indices, the type of the field is understood to be a _union_ of the various types in the index mappings. As seen in the preceding examples, this _union type_ cannot be used in the results, and cannot be referred to by the query -- except when it's passed to a type conversion function that accepts all the types in the _union_ and converts the field -to a single type. {esql} offers a suite of <> to achieve this. +to a single type. {esql} offers a suite of <> to achieve this. In the above examples, the query can use a command like `EVAL client_ip = TO_IP(client_ip)` to resolve the union of `ip` and `keyword` to just `ip`. From ebbf17b1f43c3367ecdd96f3d977e7d8f81eb70c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 26 Jul 2024 12:19:05 +0200 Subject: [PATCH 120/406] [DOCS] Amends PUT inference API docs with model download info (#111278) (#111332) * [DOCS] Amends PUT inference API docs with model download info. * [DOCS] Addresses feedback. --- .../reference/inference/put-inference.asciidoc | 18 ++++++++++++++---- .../inference/service-elasticsearch.asciidoc | 10 +++++++++- .../reference/inference/service-elser.asciidoc | 6 ++++++ 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 948496c473a20..f8732a6aff6fd 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -6,10 +6,17 @@ experimental[] Creates an {infer} endpoint to perform an {infer} task. -IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI or Hugging Face. -For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. -However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. +[IMPORTANT] +==== +* The {infer} APIs enable you to use certain services, such as built-in +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, +Azure OpenAI, Google AI Studio, Google Vertex AI or Hugging Face. +* For built-in models and models uploaded through Eland, the {infer} APIs offer an +alternative way to use and manage trained models. However, if you do not plan to +use the {infer} APIs to use these models or if you want to use non-NLP models, +use the <>. +==== + [discrete] [[put-inference-api-request]] @@ -43,3 +50,6 @@ The following services are available through the {infer} API, click the links to * <> * <> * <> + +The {es} and ELSER services run on a {ml} node in your {es} cluster. The rest of +the services connect to external providers. \ No newline at end of file diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 50b97b3506ee8..b568a4691a4bd 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -1,7 +1,12 @@ [[infer-service-elasticsearch]] === Elasticsearch {infer} service -Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. +Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` +service. + +NOTE: If you use the E5 model through the `elasticsearch` service, the API +request will automatically download and deploy the model if it isn't downloaded +yet. [discrete] @@ -81,6 +86,9 @@ Returns the document instead of only the index. Defaults to `true`. The following example shows how to create an {infer} endpoint called `my-e5-model` to perform a `text_embedding` task type. +The API request below will automatically download the E5 model if it isn't +already downloaded and then deploy the model. + [source,console] ------------------------------------------------------------ PUT _inference/text_embedding/my-e5-model diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index dff531f2a414b..34c0f7d0a9c53 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -3,6 +3,9 @@ Creates an {infer} endpoint to perform an {infer} task with the `elser` service. +NOTE: The API request will automatically download and deploy the ELSER model if +it isn't already downloaded. + [discrete] [[infer-service-elser-api-request]] @@ -63,6 +66,9 @@ The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type. Refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation] for more info. +The request below will automatically download the ELSER model if it isn't +already downloaded and then deploy the model. + [source,console] ------------------------------------------------------------ PUT _inference/sparse_embedding/my-elser-model From 21d418f1e0eb1ee7e9a2502582cc234756915e66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 26 Jul 2024 12:26:03 +0200 Subject: [PATCH 121/406] [DOCS] Documents automatic text chunking behavior for semantic text. (#111331) (#111333) --- docs/reference/mapping/types/semantic-text.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index 6ee30e6b9f831..ece22fc08b00f 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -65,6 +65,9 @@ To allow for large amounts of text to be used in semantic search, `semantic_text Each chunk will include the text subpassage and the corresponding embedding generated from it. When querying, the individual passages will be automatically searched for each document, and the most relevant passage will be used to compute a score. +Documents are split into 250-word sections with a 100-word overlap so that each section shares 100 words with the previous section. +This overlap ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. + [discrete] [[semantic-text-structure]] From c2b5ba7b6f25be1311714821ca3614c8c78dd56b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 26 Jul 2024 13:02:28 +0200 Subject: [PATCH 122/406] [DOCS] Clarify that inference ID cannot match model ID (#111310) (#111334) * Clarify that inference ID cannot match model ID * Update service-elasticsearch.asciidoc Co-authored-by: Pius --- docs/reference/inference/service-elasticsearch.asciidoc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index b568a4691a4bd..6fb0b4a38d0ef 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -123,16 +123,17 @@ The following example shows how to create an {infer} endpoint called [source,console] ------------------------------------------------------------ -PUT _inference/text_embedding/my-msmarco-minilm-model +PUT _inference/text_embedding/my-msmarco-minilm-model <1> { "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, - "model_id": "msmarco-MiniLM-L12-cos-v5" <1> + "model_id": "msmarco-MiniLM-L12-cos-v5" <2> } } ------------------------------------------------------------ // TEST[skip:TBD] -<1> The `model_id` must be the ID of a text embedding model which has already been +<1> Provide an unique identifier for the inference endpoint. The `inference_id` must be unique and must not match the `model_id`. +<2> The `model_id` must be the ID of a text embedding model which has already been {ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. From 691319eb8ceb1cff8463df36543fa06974a58365 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 26 Jul 2024 15:12:34 +0100 Subject: [PATCH 123/406] [DOCS] Update retriever reranker options (#111337) (#111349) * [DOCS] Update retriever reranker options * Fix typo --- docs/reference/search/retriever.asciidoc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index ed39ac786880b..b86339b905631 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -213,7 +213,13 @@ The `text_similarity_reranker` is a type of retriever that enhances search resul To use `text_similarity_reranker` you must first set up a `rerank` task using the <>. The `rerank` task should be set up with a machine learning model that can compute text similarity. -Currently you can integrate directly with the Cohere Rerank endpoint using the <> task, or upload a model to {es} <>. + +Currently you can: + +* Integrate directly with the <> using the `rerank` task type +* Integrate directly with the <> using the `rerank` task type +* Upload a model to {es} with {eland-docs}/machine-learning.html#ml-nlp-pytorch[Eland] +** Then set up an <> with the `rerank` task type ===== Parameters From 00bfd0286f7ea16b2300a7ae15f126140104101d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 26 Jul 2024 17:17:57 +0200 Subject: [PATCH 124/406] [DOCS] Clarifies semantic query behavior on sparse and dense vector fields (#111339) (#111355) * [DOCS] Clarifies semantic query behavior on sparse and dense vector fields. * [DOCS] Adds a NOTE to the semantic query docs. --- docs/reference/mapping/types/semantic-text.asciidoc | 7 ++++++- docs/reference/query-dsl/semantic-query.asciidoc | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index ece22fc08b00f..1a4560e42db06 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -121,7 +121,12 @@ In case you want to customize data indexing, use the <> or <> field types and create an ingest pipeline with an <> to generate the embeddings. -<> walks you through the process. +<> walks you through the process. In +these cases - when you use `sparse_vector` or `dense_vector` field types instead +of the `semantic_text` field type to customize indexing - using the +<> is not supported for querying the +field data. + [discrete] [[update-script]] diff --git a/docs/reference/query-dsl/semantic-query.asciidoc b/docs/reference/query-dsl/semantic-query.asciidoc index d0eb2da95ebc6..22b5e6c5e6aad 100644 --- a/docs/reference/query-dsl/semantic-query.asciidoc +++ b/docs/reference/query-dsl/semantic-query.asciidoc @@ -128,6 +128,10 @@ If you want to fine-tune a search on a `semantic_text` field, you need to know t You can find the task type using the <>, and check the `task_type` associated with the {infer} service. Depending on the `task_type`, use either the <> or the <> query for greater flexibility and customization. +NOTE: While it is possible to use the `sparse_vector` query or the `knn` query +on a `semantic_text` field, it is not supported to use the `semantic_query` on a +`sparse_vector` or `dense_vector` field type. + [discrete] [[search-sparse-inference]] From 836b4a5bb2c535a4fb82335a1343ab0f0e928d97 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Fri, 26 Jul 2024 17:30:22 +0200 Subject: [PATCH 125/406] Clarify some semantic_text docs (#111329) (#111353) --- docs/reference/mapping/types/semantic-text.asciidoc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index 1a4560e42db06..522a0c54c8aad 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -52,8 +52,8 @@ Use the <> to create the endpoint. The `inference_id` will not be validated when the mapping is created, but when documents are ingested into the index. When the first document is indexed, the `inference_id` will be used to generate underlying indexing structures for the field. -WARNING: Removing an inference endpoint will cause ingestion of documents and semantic queries to fail on indices that define `semantic_text` fields with that inference endpoint as their `inference_id`. -Please check that inference endpoints are not used in `semantic_text` fields before removal. +WARNING: Removing an {infer} endpoint will cause ingestion of documents and semantic queries to fail on indices that define `semantic_text` fields with that {infer} endpoint as their `inference_id`. +Trying to <> that is used on a `semantic_text` field will result in an error. [discrete] [[auto-text-chunking]] @@ -132,7 +132,8 @@ field data. [[update-script]] ==== Updates to `semantic_text` fields -Updates that use scripts are not supported when the index contains a `semantic_text` field. +Updates that use scripts are not supported for an index contains a `semantic_text` field. +Even if the script targets non-`semantic_text` fields, the update will fail when the index contains a `semantic_text` field. [discrete] From 010be8764fb929eaf572f3fb0e2b0ef8ef15ab15 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Fri, 26 Jul 2024 19:24:57 -0400 Subject: [PATCH 126/406] Fix geoip processor isp_organization_name property and docs (#111372) (#111374) --- docs/reference/ingest/processors/geoip.asciidoc | 4 ++-- .../java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java | 4 ++-- .../org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 738ac234d6162..230146d483144 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -64,12 +64,12 @@ depend on what has been found and which properties were configured in `propertie * If the GeoIP2 Domain database is used, then the following fields may be added under the `target_field`: `ip`, and `domain`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 ISP database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, -`organization_name`, `network`, `isp`, `isp_organization`, `mobile_country_code`, and `mobile_network_code`. The fields actually added +`organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, `country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, `location`, `asn`, `organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, -`residential_proxy`, `domain`, `isp`, `isp_organization`, `mobile_country_code`, `mobile_network_code`, `user_type`, and +`residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and `connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index e39705a71f56c..82b9e930280b7 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -592,7 +592,7 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas } case ISP_ORGANIZATION_NAME -> { if (ispOrganization != null) { - geoData.put("isp_organization", ispOrganization); + geoData.put("isp_organization_name", ispOrganization); } } case MOBILE_COUNTRY_CODE -> { @@ -660,7 +660,7 @@ private Map retrieveIspGeoData(GeoIpDatabase geoIpDatabase, Inet } case ISP_ORGANIZATION_NAME -> { if (ispOrganization != null) { - geoData.put("isp_organization", ispOrganization); + geoData.put("isp_organization_name", ispOrganization); } } case MOBILE_COUNTRY_CODE -> { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index 6276155d9f083..87d1881a9e743 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -463,7 +463,7 @@ public void testEnterprise() throws Exception { assertThat(geoData.get("residential_proxy"), equalTo(false)); assertThat(geoData.get("domain"), equalTo("frpt.net")); assertThat(geoData.get("isp"), equalTo("Fairpoint Communications")); - assertThat(geoData.get("isp_organization"), equalTo("Fairpoint Communications")); + assertThat(geoData.get("isp_organization_name"), equalTo("Fairpoint Communications")); assertThat(geoData.get("user_type"), equalTo("residential")); assertThat(geoData.get("connection_type"), equalTo("Cable/DSL")); } @@ -497,7 +497,7 @@ public void testIsp() throws Exception { assertThat(geoData.get("organization_name"), equalTo("CELLCO-PART")); assertThat(geoData.get("network"), equalTo("149.101.100.0/28")); assertThat(geoData.get("isp"), equalTo("Verizon Wireless")); - assertThat(geoData.get("isp_organization"), equalTo("Verizon Wireless")); + assertThat(geoData.get("isp_organization_name"), equalTo("Verizon Wireless")); assertThat(geoData.get("mobile_network_code"), equalTo("004")); assertThat(geoData.get("mobile_country_code"), equalTo("310")); } From 72cfb0b3b8c1472272f897c7c94c7d6abcbbdfca Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:41:03 +0100 Subject: [PATCH 127/406] fix text_similarity_reranker doc (#111256) (#111401) Co-authored-by: weizijun --- docs/reference/search/retriever.asciidoc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index b86339b905631..0afe9f77286a8 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -262,13 +262,13 @@ GET /index/_search "text_similarity_reranker": { "retriever": { "standard": { ... } - } - }, - "field": "text", - "inference_id": "my-cohere-rerank-model", - "inference_text": "Most famous landmark in Paris", - "rank_window_size": 100, - "min_score": 0.5 + }, + "field": "text", + "inference_id": "my-cohere-rerank-model", + "inference_text": "Most famous landmark in Paris", + "rank_window_size": 100, + "min_score": 0.5 + } } } ---- From 6ee87471f8b5425177923fd5447b5cb0384f649e Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:00:37 +0100 Subject: [PATCH 128/406] [DOCS] Additional reranking docs updates (#111350) (#111404) * Simplify overview, specify available rerank options * Update links * Clarify eland-uploaded models is for hugging face --- docs/reference/search/retriever.asciidoc | 5 ++++ .../semantic-reranking.asciidoc | 29 +++++++------------ 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index 0afe9f77286a8..1b7376c21daab 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -209,6 +209,11 @@ GET /index/_search The `text_similarity_reranker` is a type of retriever that enhances search results by re-ranking documents based on semantic similarity to a specified inference text, using a machine learning model. +[TIP] +==== +Refer to <> for a high level overview of semantic reranking. +==== + ===== Prerequisites To use `text_similarity_reranker` you must first set up a `rerank` task using the <>. diff --git a/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc index 75c06aa953302..f25741fca0b8f 100644 --- a/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc @@ -5,7 +5,7 @@ preview::[] [TIP] ==== -This overview focuses more on the high-level concepts and use cases for semantic reranking. For full implementation details on how to set up and use semantic reranking in {es}, see the <> in the Search API docs. +This overview focuses more on the high-level concepts and use cases for semantic reranking. For full implementation details on how to set up and use semantic reranking in {es}, see the <> in the Search API docs. ==== Rerankers improve the relevance of results from earlier-stage retrieval mechanisms. @@ -89,11 +89,16 @@ In {es}, semantic rerankers are implemented using the {es} <>. +. *Choose a reranking model*. +Currently you can: + +** Integrate directly with the <> using the `rerank` task type +** Integrate directly with the <> using the `rerank` task type +** Upload a model to {es} from Hugging Face with {eland-docs}/machine-learning.html#ml-nlp-pytorch[Eland] +*** Then set up an <> with the `rerank` task type +. *Create a `rerank` task using the <>*. The Inference API creates an inference endpoint and configures your chosen machine learning model to perform the reranking task. -. Define a `text_similarity_reranker` retriever in your search request. +. *Define a `text_similarity_reranker` retriever in your search request*. The retriever syntax makes it simple to configure both the retrieval and reranking of search results in a single API call. .*Example search request* with semantic reranker @@ -127,20 +132,6 @@ POST _search // TEST[skip:TBD] ============== -[discrete] -[[semantic-reranking-types]] -==== Supported reranking types - -The following `text_similarity_reranker` model configuration options are available. - -*Text similarity with cross-encoder* - -This solution uses a hosted or 3rd party inference service which relies on a cross-encoder model. -The model receives the text fields from the _top-K_ documents, as well as the search query, and calculates scores directly, which are then used to rerank the documents. - -Used with the Cohere inference service rolled out in 8.13, turn on semantic reranking that works out of the box. -Check out our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/cohere/cohere-elasticsearch.ipynb[Python notebook] for using Cohere with {es}. - [discrete] [[semantic-reranking-learn-more]] ==== Learn more From fc352bd210a34c871e40f2d53a81d65705919d33 Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:35:48 -0400 Subject: [PATCH 129/406] [Inference API] Replace model_id with inference_id in inference API except when stored (#111366) (#111417) * Replace model_id with inference_id in inference API except when storing ModelConfigs * Update docs/changelog/111366.yaml * replace missed literals in tests --- docs/changelog/111366.yaml | 6 ++++++ .../inference/ModelConfigurations.java | 18 +++++++++++++++--- .../xpack/inference/InferenceCrudIT.java | 6 +++--- .../inference/MockDenseInferenceServiceIT.java | 2 +- .../MockSparseInferenceServiceIT.java | 6 +++--- .../inference/integration/ModelRegistryIT.java | 4 ++-- .../inference/registry/ModelRegistry.java | 10 ++++++++-- 7 files changed, 38 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/111366.yaml diff --git a/docs/changelog/111366.yaml b/docs/changelog/111366.yaml new file mode 100644 index 0000000000000..9cb127077094f --- /dev/null +++ b/docs/changelog/111366.yaml @@ -0,0 +1,6 @@ +pr: 111366 +summary: "[Inference API] Replace `model_id` with `inference_id` in inference API\ + \ except when stored" +area: Machine Learning +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java index 4b6f436460fdc..0df0378c4a5f4 100644 --- a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java +++ b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java @@ -20,7 +20,11 @@ public class ModelConfigurations implements ToFilteredXContentObject, VersionedNamedWriteable { - public static final String MODEL_ID = "model_id"; + // Due to refactoring, we now have different field names for the inference ID when it is serialized and stored to an index vs when it + // is returned as part of a GetInferenceModelAction + public static final String INDEX_ONLY_ID_FIELD_NAME = "model_id"; + public static final String INFERENCE_ID_FIELD_NAME = "inference_id"; + public static final String USE_ID_FOR_INDEX = "for_index"; public static final String SERVICE = "service"; public static final String SERVICE_SETTINGS = "service_settings"; public static final String TASK_SETTINGS = "task_settings"; @@ -119,7 +123,11 @@ public TaskSettings getTaskSettings() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(MODEL_ID, inferenceEntityId); + if (params.paramAsBoolean(USE_ID_FOR_INDEX, false)) { + builder.field(INDEX_ONLY_ID_FIELD_NAME, inferenceEntityId); + } else { + builder.field(INFERENCE_ID_FIELD_NAME, inferenceEntityId); + } builder.field(TaskType.NAME, taskType.toString()); builder.field(SERVICE, service); builder.field(SERVICE_SETTINGS, serviceSettings); @@ -131,7 +139,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public XContentBuilder toFilteredXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(MODEL_ID, inferenceEntityId); + if (params.paramAsBoolean(USE_ID_FOR_INDEX, false)) { + builder.field(INDEX_ONLY_ID_FIELD_NAME, inferenceEntityId); + } else { + builder.field(INFERENCE_ID_FIELD_NAME, inferenceEntityId); + } builder.field(TaskType.NAME, taskType.toString()); builder.field(SERVICE, service); builder.field(SERVICE_SETTINGS, serviceSettings.getFilteredXContentObject()); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 75e392b6d155f..2325e17c23944 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -48,7 +48,7 @@ public void testGet() throws IOException { var singleModel = getModels("se_model_1", TaskType.SPARSE_EMBEDDING); assertThat(singleModel, hasSize(1)); - assertEquals("se_model_1", singleModel.get(0).get("model_id")); + assertEquals("se_model_1", singleModel.get(0).get("inference_id")); for (int i = 0; i < 5; i++) { deleteModel("se_model_" + i, TaskType.SPARSE_EMBEDDING); @@ -81,7 +81,7 @@ public void testGetModelWithAnyTaskType() throws IOException { String inferenceEntityId = "sparse_embedding_model"; putModel(inferenceEntityId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); var singleModel = getModels(inferenceEntityId, TaskType.ANY); - assertEquals(inferenceEntityId, singleModel.get(0).get("model_id")); + assertEquals(inferenceEntityId, singleModel.get(0).get("inference_id")); assertEquals(TaskType.SPARSE_EMBEDDING.toString(), singleModel.get(0).get("task_type")); } @@ -90,7 +90,7 @@ public void testApisWithoutTaskType() throws IOException { String modelId = "no_task_type_in_url"; putModel(modelId, mockSparseServiceModelConfig(TaskType.SPARSE_EMBEDDING)); var singleModel = getModel(modelId); - assertEquals(modelId, singleModel.get("model_id")); + assertEquals(modelId, singleModel.get("inference_id")); assertEquals(TaskType.SPARSE_EMBEDDING.toString(), singleModel.get("task_type")); var inference = inferOnMockService(modelId, List.of(randomAlphaOfLength(10))); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java index 833b1fd3673fc..5f6bad5687407 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockDenseInferenceServiceIT.java @@ -22,7 +22,7 @@ public void testMockService() throws IOException { var model = getModels(inferenceEntityId, TaskType.TEXT_EMBEDDING).get(0); for (var modelMap : List.of(putModel, model)) { - assertEquals(inferenceEntityId, modelMap.get("model_id")); + assertEquals(inferenceEntityId, modelMap.get("inference_id")); assertEquals(TaskType.TEXT_EMBEDDING, TaskType.fromString((String) modelMap.get("task_type"))); assertEquals("text_embedding_test_service", modelMap.get("service")); } diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockSparseInferenceServiceIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockSparseInferenceServiceIT.java index 97e0641f37c33..24ba2708f5de4 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockSparseInferenceServiceIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/MockSparseInferenceServiceIT.java @@ -24,7 +24,7 @@ public void testMockService() throws IOException { var model = getModels(inferenceEntityId, TaskType.SPARSE_EMBEDDING).get(0); for (var modelMap : List.of(putModel, model)) { - assertEquals(inferenceEntityId, modelMap.get("model_id")); + assertEquals(inferenceEntityId, modelMap.get("inference_id")); assertEquals(TaskType.SPARSE_EMBEDDING, TaskType.fromString((String) modelMap.get("task_type"))); assertEquals("test_service", modelMap.get("service")); } @@ -77,7 +77,7 @@ public void testMockService_DoesNotReturnHiddenField_InModelResponses() throws I var model = getModels(inferenceEntityId, TaskType.SPARSE_EMBEDDING).get(0); for (var modelMap : List.of(putModel, model)) { - assertEquals(inferenceEntityId, modelMap.get("model_id")); + assertEquals(inferenceEntityId, modelMap.get("inference_id")); assertThat(modelMap.get("service_settings"), is(Map.of("model", "my_model"))); assertEquals(TaskType.SPARSE_EMBEDDING, TaskType.fromString((String) modelMap.get("task_type"))); assertEquals("test_service", modelMap.get("service")); @@ -95,7 +95,7 @@ public void testMockService_DoesReturnHiddenField_InModelResponses() throws IOEx var model = getModels(inferenceEntityId, TaskType.SPARSE_EMBEDDING).get(0); for (var modelMap : List.of(putModel, model)) { - assertEquals(inferenceEntityId, modelMap.get("model_id")); + assertEquals(inferenceEntityId, modelMap.get("inference_id")); assertThat(modelMap.get("service_settings"), is(Map.of("model", "my_model", "hidden_field", "my_hidden_value"))); assertEquals(TaskType.SPARSE_EMBEDDING, TaskType.fromString((String) modelMap.get("task_type"))); assertEquals("test_service", modelMap.get("service")); diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java index 776232f1e29e6..abe73e11a4873 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java @@ -405,7 +405,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("unknown_field", "foo"); - builder.field(MODEL_ID, getInferenceEntityId()); + builder.field(INDEX_ONLY_ID_FIELD_NAME, getInferenceEntityId()); builder.field(TaskType.NAME, getTaskType().toString()); builder.field(SERVICE, getService()); builder.field(SERVICE_SETTINGS, getServiceSettings()); @@ -431,7 +431,7 @@ private static class ModelWithUnknownField extends ModelConfigurations { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("unknown_field", "foo"); - builder.field(MODEL_ID, getInferenceEntityId()); + builder.field(INDEX_ONLY_ID_FIELD_NAME, getInferenceEntityId()); builder.field(TaskType.NAME, getTaskType().toString()); builder.field(SERVICE, getService()); builder.field(SERVICE_SETTINGS, getServiceSettings()); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java index ae82264a77a0d..a6e4fcae7169f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java @@ -74,7 +74,10 @@ public static UnparsedModel unparsedModelFromMap(ModelConfigMap modelConfigMap) if (modelConfigMap.config() == null) { throw new ElasticsearchStatusException("Missing config map", RestStatus.BAD_REQUEST); } - String inferenceEntityId = ServiceUtils.removeStringOrThrowIfNull(modelConfigMap.config(), ModelConfigurations.MODEL_ID); + String inferenceEntityId = ServiceUtils.removeStringOrThrowIfNull( + modelConfigMap.config(), + ModelConfigurations.INDEX_ONLY_ID_FIELD_NAME + ); String service = ServiceUtils.removeStringOrThrowIfNull(modelConfigMap.config(), ModelConfigurations.SERVICE); String taskTypeStr = ServiceUtils.removeStringOrThrowIfNull(modelConfigMap.config(), TaskType.NAME); TaskType taskType = TaskType.fromString(taskTypeStr); @@ -375,7 +378,10 @@ public void deleteModel(String inferenceEntityId, ActionListener listen private static IndexRequest createIndexRequest(String docId, String indexName, ToXContentObject body, boolean allowOverwriting) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { var request = new IndexRequest(indexName); - XContentBuilder source = body.toXContent(builder, ToXContent.EMPTY_PARAMS); + XContentBuilder source = body.toXContent( + builder, + new ToXContent.MapParams(Map.of(ModelConfigurations.USE_ID_FOR_INDEX, Boolean.TRUE.toString())) + ); var operation = allowOverwriting ? DocWriteRequest.OpType.INDEX : DocWriteRequest.OpType.CREATE; return request.opType(operation).id(docId).source(source); From e0040761ad66ff7ede2b220315d351d49f88d770 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 30 Jul 2024 04:16:38 +1000 Subject: [PATCH 130/406] Mute org.elasticsearch.xpack.transform.integration.TransformIT testStopWaitForCheckpoint #106113 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 629acf8a767b6..bda45fd178691 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -92,6 +92,9 @@ tests: - class: org.elasticsearch.packaging.test.DockerTests method: test600Interrupt issue: https://github.com/elastic/elasticsearch/issues/111324 +- class: org.elasticsearch.xpack.transform.integration.TransformIT + method: testStopWaitForCheckpoint + issue: https://github.com/elastic/elasticsearch/issues/106113 # Examples: # From d43021eb0094014684bd3d8920b81dbe2aecc862 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Mon, 29 Jul 2024 15:43:17 -0400 Subject: [PATCH 131/406] Remove 4096 bool query max limit from docs (#111421) (#111422) indices.query.bool.max_clause_count is set automatically and does not default to 4096 as before. This remove mentions of 4096 from query documentations. Relates to PR#91811 --- docs/reference/query-dsl/query-string-query.asciidoc | 4 ++-- docs/reference/query-dsl/span-multi-term-query.asciidoc | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index 319ede7c4ac05..b45247ace3735 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -30,7 +30,7 @@ If you don't need to support a query syntax, consider using the syntax, use the <> query, which is less strict. ==== - + [[query-string-query-ex-request]] ==== Example request @@ -83,7 +83,7 @@ could be expensive. There is a limit on the number of fields times terms that can be queried at once. It is defined by the `indices.query.bool.max_clause_count` -<>, which defaults to 4096. +<>. ==== -- diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index aefb3e4b75eb5..5a5f0e1f5ff99 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -39,7 +39,8 @@ GET /_search -------------------------------------------------- WARNING: `span_multi` queries will hit too many clauses failure if the number of terms that match the query exceeds the -boolean query limit (defaults to 4096).To avoid an unbounded expansion you can set the <>. +To avoid an unbounded expansion you can set the <> of the multi term query to `top_terms_*` rewrite. Or, if you use `span_multi` on `prefix` query only, you can activate the <> field option of the `text` field instead. This will rewrite any prefix query on the field to a single term query that matches the indexed prefix. From 3f30e388fcb85efc4a0eadc30f2030145e883401 Mon Sep 17 00:00:00 2001 From: James Baiera Date: Mon, 29 Jul 2024 17:27:37 -0400 Subject: [PATCH 132/406] Fix enrich policy runner exception handling on empty segments response (#111290) (#111371) * Fix enrich segment action listener exception logic * Update docs/changelog/111290.yaml Co-authored-by: Elastic Machine --- docs/changelog/111290.yaml | 5 + .../segments/IndicesSegmentResponse.java | 2 +- .../xpack/enrich/EnrichPolicyRunner.java | 104 +++++--- .../xpack/enrich/EnrichPolicyRunnerTests.java | 251 +++++++++++++++++- 4 files changed, 325 insertions(+), 37 deletions(-) create mode 100644 docs/changelog/111290.yaml diff --git a/docs/changelog/111290.yaml b/docs/changelog/111290.yaml new file mode 100644 index 0000000000000..efcb01a4aedf9 --- /dev/null +++ b/docs/changelog/111290.yaml @@ -0,0 +1,5 @@ +pr: 111290 +summary: Fix enrich policy runner exception handling on empty segments response +area: Ingest Node +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index bd12cfdbc7962..429ebe365bbe1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -36,7 +36,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse { private volatile Map indicesSegments; - IndicesSegmentResponse( + public IndicesSegmentResponse( ShardSegments[] shards, int totalShards, int successfulShards, diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index 5cb9c0cf9c051..2ff4863a12b6e 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -25,11 +25,11 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.FilterClient; import org.elasticsearch.client.internal.OriginSettingClient; @@ -572,48 +572,82 @@ private void refreshEnrichIndex(final String destinationIndexName, final int att protected void ensureSingleSegment(final String destinationIndexName, final int attempt) { enrichOriginClient().admin() .indices() - .segments(new IndicesSegmentsRequest(destinationIndexName), new DelegatingActionListener<>(listener) { - @Override - public void onResponse(IndicesSegmentResponse indicesSegmentResponse) { - IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(destinationIndexName); - if (indexSegments == null) { + .segments(new IndicesSegmentsRequest(destinationIndexName), listener.delegateFailureAndWrap((l, indicesSegmentResponse) -> { + int failedShards = indicesSegmentResponse.getFailedShards(); + if (failedShards > 0) { + // Encountered a problem while querying the segments for the enrich index. Try and surface the problem in the log. + logger.warn( + "Policy [{}]: Encountered [{}] shard level failures while querying the segments for enrich index [{}]. " + + "Turn on DEBUG logging for details.", + policyName, + failedShards, + enrichIndexName + ); + if (logger.isDebugEnabled()) { + DefaultShardOperationFailedException[] shardFailures = indicesSegmentResponse.getShardFailures(); + int failureNumber = 1; + String logPrefix = "Policy [" + policyName + "]: Encountered shard failure ["; + String logSuffix = " of " + + shardFailures.length + + "] while querying segments for enrich index [" + + enrichIndexName + + "]. Shard ["; + for (DefaultShardOperationFailedException shardFailure : shardFailures) { + logger.debug( + logPrefix + failureNumber + logSuffix + shardFailure.index() + "][" + shardFailure.shardId() + "]", + shardFailure.getCause() + ); + failureNumber++; + } + } + } + IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(destinationIndexName); + if (indexSegments == null) { + if (indicesSegmentResponse.getShardFailures().length == 0) { throw new ElasticsearchException( "Could not locate segment information for newly created index [{}]", destinationIndexName ); + } else { + DefaultShardOperationFailedException shardFailure = indicesSegmentResponse.getShardFailures()[0]; + throw new ElasticsearchException( + "Could not obtain segment information for newly created index [{}]; shard info [{}][{}]", + shardFailure.getCause(), + destinationIndexName, + shardFailure.index(), + shardFailure.shardId() + ); } - Map indexShards = indexSegments.getShards(); - assert indexShards.size() == 1 : "Expected enrich index to contain only one shard"; - ShardSegments[] shardSegments = indexShards.get(0).shards(); - assert shardSegments.length == 1 : "Expected enrich index to contain no replicas at this point"; - ShardSegments primarySegments = shardSegments[0]; - if (primarySegments.getSegments().size() > 1) { - int nextAttempt = attempt + 1; - if (nextAttempt > maxForceMergeAttempts) { - delegate.onFailure( - new ElasticsearchException( - "Force merging index [{}] attempted [{}] times but did not result in one segment.", - destinationIndexName, - attempt, - maxForceMergeAttempts - ) - ); - } else { - logger.debug( - "Policy [{}]: Force merge result contains more than one segment [{}], retrying (attempt {}/{})", - policyName, - primarySegments.getSegments().size(), - nextAttempt, - maxForceMergeAttempts - ); - forceMergeEnrichIndex(destinationIndexName, nextAttempt); - } + } + Map indexShards = indexSegments.getShards(); + assert indexShards.size() == 1 : "Expected enrich index to contain only one shard"; + ShardSegments[] shardSegments = indexShards.get(0).shards(); + assert shardSegments.length == 1 : "Expected enrich index to contain no replicas at this point"; + ShardSegments primarySegments = shardSegments[0]; + if (primarySegments.getSegments().size() > 1) { + int nextAttempt = attempt + 1; + if (nextAttempt > maxForceMergeAttempts) { + throw new ElasticsearchException( + "Force merging index [{}] attempted [{}] times but did not result in one segment.", + destinationIndexName, + attempt, + maxForceMergeAttempts + ); } else { - // Force merge down to one segment successful - setIndexReadOnly(destinationIndexName); + logger.debug( + "Policy [{}]: Force merge result contains more than one segment [{}], retrying (attempt {}/{})", + policyName, + primarySegments.getSegments().size(), + nextAttempt, + maxForceMergeAttempts + ); + forceMergeEnrichIndex(destinationIndexName, nextAttempt); } + } else { + // Force merge down to one segment successful + setIndexReadOnly(destinationIndexName); } - }); + })); } private void setIndexReadOnly(final String destinationIndexName) { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java index 8ce1e7f350ccb..7ba3b356d6015 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; @@ -2048,6 +2049,254 @@ protected void ensureSingleSegment(String destinationIndexName, int attempt) { ensureEnrichIndexIsReadOnly(createdEnrichIndex); } + public void testRunnerWithEmptySegmentsResponse() throws Exception { + final String sourceIndex = "source-index"; + DocWriteResponse indexRequest = client().index(new IndexRequest().index(sourceIndex).id("id").source(""" + { + "field1": "value1", + "field2": 2, + "field3": "ignored", + "field4": "ignored", + "field5": "value5" + }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); + assertEquals(RestStatus.CREATED, indexRequest.status()); + + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); + List enrichFields = List.of("field2", "field5"); + EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "field1", enrichFields); + String policyName = "test1"; + + final long createTime = randomNonNegativeLong(); + String createdEnrichIndex = ".enrich-test1-" + createTime; + final AtomicReference exception = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + ActionListener listener = createTestListener(latch, exception::set); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + IndexNameExpressionResolver resolver = getInstanceFromNode(IndexNameExpressionResolver.class); + Task asyncTask = testTaskManager.register("enrich", "policy_execution", new TaskAwareRequest() { + @Override + public void setParentTask(TaskId taskId) {} + + @Override + public void setRequestId(long requestId) {} + + @Override + public TaskId getParentTask() { + return TaskId.EMPTY_TASK_ID; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new ExecuteEnrichPolicyTask(id, type, action, getDescription(), parentTaskId, headers); + } + + @Override + public String getDescription() { + return policyName; + } + }); + ExecuteEnrichPolicyTask task = ((ExecuteEnrichPolicyTask) asyncTask); + // The executor would wrap the listener in order to clean up the task in the + // task manager, but we're just testing the runner, so we make sure to clean + // up after ourselves. + ActionListener wrappedListener = ActionListener.runBefore( + listener, + () -> testTaskManager.unregister(task) + ); + + // Wrap the client so that when we receive the indices segments action, we intercept the request and complete it on another thread + // with an empty segments response. + Client client = new FilterClient(client()) { + @Override + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + if (action.equals(IndicesSegmentsAction.INSTANCE)) { + testThreadPool.generic().execute(() -> { + @SuppressWarnings("unchecked") + ActionListener castListener = ((ActionListener) listener); + castListener.onResponse(new IndicesSegmentResponse(new ShardSegments[0], 0, 0, 0, List.of())); + }); + } else { + super.doExecute(action, request, listener); + } + } + }; + + EnrichPolicyRunner enrichPolicyRunner = new EnrichPolicyRunner( + policyName, + policy, + task, + wrappedListener, + clusterService, + getInstanceFromNode(IndicesService.class), + client, + resolver, + createdEnrichIndex, + randomIntBetween(1, 10000), + randomIntBetween(3, 10) + ); + + logger.info("Starting policy run"); + enrichPolicyRunner.run(); + if (latch.await(1, TimeUnit.MINUTES) == false) { + fail("Timeout while waiting for runner to complete"); + } + Exception exceptionThrown = exception.get(); + if (exceptionThrown == null) { + fail("Expected exception to be thrown from segment api"); + } + + // Validate exception information + assertThat(exceptionThrown, instanceOf(ElasticsearchException.class)); + assertThat(exceptionThrown.getMessage(), containsString("Could not locate segment information for newly created index")); + } + + public void testRunnerWithShardFailuresInSegmentResponse() throws Exception { + final String sourceIndex = "source-index"; + DocWriteResponse indexRequest = client().index(new IndexRequest().index(sourceIndex).id("id").source(""" + { + "field1": "value1", + "field2": 2, + "field3": "ignored", + "field4": "ignored", + "field5": "value5" + }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); + assertEquals(RestStatus.CREATED, indexRequest.status()); + + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + assertThat(sourceDocMap.get("field1"), is(equalTo("value1"))); + assertThat(sourceDocMap.get("field2"), is(equalTo(2))); + assertThat(sourceDocMap.get("field3"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field4"), is(equalTo("ignored"))); + assertThat(sourceDocMap.get("field5"), is(equalTo("value5"))); + } + ); + List enrichFields = List.of("field2", "field5"); + EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "field1", enrichFields); + String policyName = "test1"; + + final long createTime = randomNonNegativeLong(); + String createdEnrichIndex = ".enrich-test1-" + createTime; + final AtomicReference exception = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + ActionListener listener = createTestListener(latch, exception::set); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + IndexNameExpressionResolver resolver = getInstanceFromNode(IndexNameExpressionResolver.class); + Task asyncTask = testTaskManager.register("enrich", "policy_execution", new TaskAwareRequest() { + @Override + public void setParentTask(TaskId taskId) {} + + @Override + public void setRequestId(long requestId) {} + + @Override + public TaskId getParentTask() { + return TaskId.EMPTY_TASK_ID; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new ExecuteEnrichPolicyTask(id, type, action, getDescription(), parentTaskId, headers); + } + + @Override + public String getDescription() { + return policyName; + } + }); + ExecuteEnrichPolicyTask task = ((ExecuteEnrichPolicyTask) asyncTask); + // The executor would wrap the listener in order to clean up the task in the + // task manager, but we're just testing the runner, so we make sure to clean + // up after ourselves. + ActionListener wrappedListener = ActionListener.runBefore( + listener, + () -> testTaskManager.unregister(task) + ); + + // Wrap the client so that when we receive the indices segments action, we intercept the request and complete it on another thread + // with an failed segments response. + Client client = new FilterClient(client()) { + @Override + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + if (action.equals(IndicesSegmentsAction.INSTANCE)) { + testThreadPool.generic().execute(() -> { + @SuppressWarnings("unchecked") + ActionListener castListener = ((ActionListener) listener); + castListener.onResponse( + new IndicesSegmentResponse( + new ShardSegments[0], + 0, + 0, + 3, + List.of( + new DefaultShardOperationFailedException(createdEnrichIndex, 1, new ElasticsearchException("failure1")), + new DefaultShardOperationFailedException(createdEnrichIndex, 2, new ElasticsearchException("failure2")), + new DefaultShardOperationFailedException(createdEnrichIndex, 3, new ElasticsearchException("failure3")) + ) + ) + ); + }); + } else { + super.doExecute(action, request, listener); + } + } + }; + + EnrichPolicyRunner enrichPolicyRunner = new EnrichPolicyRunner( + policyName, + policy, + task, + wrappedListener, + clusterService, + getInstanceFromNode(IndicesService.class), + client, + resolver, + createdEnrichIndex, + randomIntBetween(1, 10000), + randomIntBetween(3, 10) + ); + + logger.info("Starting policy run"); + enrichPolicyRunner.run(); + if (latch.await(1, TimeUnit.MINUTES) == false) { + fail("Timeout while waiting for runner to complete"); + } + Exception exceptionThrown = exception.get(); + if (exceptionThrown == null) { + fail("Expected exception to be thrown from segment api"); + } + + // Validate exception information + assertThat(exceptionThrown, instanceOf(ElasticsearchException.class)); + assertThat(exceptionThrown.getMessage(), containsString("Could not obtain segment information for newly created index")); + assertThat(exceptionThrown.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(exceptionThrown.getCause().getMessage(), containsString("failure1")); + } + public void testRunnerCancel() throws Exception { final String sourceIndex = "source-index"; DocWriteResponse indexRequest = client().index(new IndexRequest().index(sourceIndex).id("id").source(""" @@ -2495,7 +2744,7 @@ private ActionListener createTestListener( final CountDownLatch latch, final Consumer exceptionConsumer ) { - return new LatchedActionListener<>(ActionListener.wrap((r) -> logger.info("Run complete"), exceptionConsumer), latch); + return new LatchedActionListener<>(ActionListener.wrap((r) -> logger.debug("Run complete"), exceptionConsumer), latch); } private void validateMappingMetadata(Map mapping, String policyName, EnrichPolicy policy) { From 155eee6822e5f28808a293ac92bdfa4f3f9afd36 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 29 Jul 2024 18:25:45 -0400 Subject: [PATCH 133/406] [8.15] Ensure vector similarity correctly limits inner_hits returned for nested kNN (#111363) (#111426) * Ensure vector similarity correctly limits inner_hits returned for nested kNN (#111363) For nested kNN we support not only similarity thresholds, but also multi-passage search while retrieving more than one nearest passage. However, the inner_hits retrieved for the kNN search would ignore the restricted similarity. Meaning, the inner hits would return all passages, not just the ones within the limited similarity and this is confusing. closes: https://github.com/elastic/elasticsearch/issues/111093 (cherry picked from commit 69c96974de548ee3bfbfed482f0c205e18d42c8d) * fixing for backport * adj for backport * fix compilation for tests --- docs/changelog/111363.yaml | 6 +++ .../search.vectors/100_knn_nested_search.yml | 50 +++++++++++++++++++ .../org/elasticsearch/TransportVersions.java | 1 + .../action/search/DfsQueryPhase.java | 3 +- .../vectors/DenseVectorFieldMapper.java | 8 ++- .../search/vectors/ExactKnnQueryBuilder.java | 35 +++++++------ .../vectors/KnnScoreDocQueryBuilder.java | 37 ++++++++------ .../search/vectors/KnnSearchBuilder.java | 4 ++ .../search/vectors/KnnVectorQueryBuilder.java | 2 +- .../action/search/DfsQueryPhaseTests.java | 7 ++- .../vectors/DenseVectorFieldTypeTests.java | 4 +- ...AbstractKnnVectorQueryBuilderTestCase.java | 17 +++++++ .../vectors/ExactKnnQueryBuilderTests.java | 23 ++++++++- .../vectors/KnnScoreDocQueryBuilderTests.java | 27 +++++++--- 14 files changed, 180 insertions(+), 44 deletions(-) create mode 100644 docs/changelog/111363.yaml diff --git a/docs/changelog/111363.yaml b/docs/changelog/111363.yaml new file mode 100644 index 0000000000000..2cb3c5342ea5c --- /dev/null +++ b/docs/changelog/111363.yaml @@ -0,0 +1,6 @@ +pr: 111363 +summary: Ensure vector similarity correctly limits `inner_hits` returned for nested + kNN +area: Vector Search +type: bug +issues: [111093] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml index 72c6abab22600..d255a644183dc 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml @@ -411,3 +411,53 @@ setup: - match: {hits.total.value: 1} - match: {hits.hits.0._id: "2"} +--- +"nested Knn search with required similarity appropriately filters inner_hits": + - requires: + cluster_features: "gte_v8.15.0" + reason: 'bugfix for 8.15' + + - do: + search: + index: test + body: + query: + nested: + path: nested + inner_hits: + size: 3 + _source: false + fields: + - nested.paragraph_id + query: + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + num_candidates: 3 + similarity: 10.5 + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} + - length: {hits.hits.0.inner_hits.nested.hits.hits: 1} + - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} + + - do: + search: + index: test + body: + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + num_candidates: 3 + k: 3 + similarity: 10.5 + inner_hits: + size: 3 + _source: false + fields: + - nested.paragraph_id + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} + - length: {hits.hits.0.inner_hits.nested.hits.hits: 1} + - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7ac9b1a3f8013..9d57c50b17aaf 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -210,6 +210,7 @@ static TransportVersion def(int id) { public static final TransportVersion VERSIONED_MASTER_NODE_REQUESTS = def(8_701_00_0); public static final TransportVersion ML_INFERENCE_AMAZON_BEDROCK_ADDED = def(8_702_00_0); public static final TransportVersion ENTERPRISE_GEOIP_DOWNLOADER_BACKPORT_8_15 = def(8_702_00_1); + public static final TransportVersion FIX_VECTOR_SIMILARITY_INNER_HITS_BACKPORT_8_15 = def(8_702_00_2); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 9ddac7f13eb51..7a33eaa59eb03 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -155,7 +155,8 @@ ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { QueryBuilder query = new KnnScoreDocQueryBuilder( scoreDocs.toArray(Lucene.EMPTY_SCORE_DOCS), source.knnSearch().get(i).getField(), - source.knnSearch().get(i).getQueryVector() + source.knnSearch().get(i).getQueryVector(), + source.knnSearch().get(i).getSimilarity() ).boost(source.knnSearch().get(i).boost()).queryName(source.knnSearch().get(i).queryName()); if (nestedPath != null) { query = new NestedQueryBuilder(nestedPath, query, ScoreMode.Max).innerHit(source.knnSearch().get(i).innerHit()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index d27c0acdb6b2e..b9912b3e097a2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -1711,17 +1711,21 @@ public Query termQuery(Object value, SearchExecutionContext context) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support term queries"); } - public Query createExactKnnQuery(VectorData queryVector) { + public Query createExactKnnQuery(VectorData queryVector, Float vectorSimilarity) { if (isIndexed() == false) { throw new IllegalArgumentException( "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" ); } - return switch (elementType) { + Query knnQuery = switch (elementType) { case BYTE -> createExactKnnByteQuery(queryVector.asByteVector()); case FLOAT -> createExactKnnFloatQuery(queryVector.asFloatVector()); case BIT -> createExactKnnBitQuery(queryVector.asByteVector()); }; + if (vectorSimilarity != null) { + knnQuery = new VectorSimilarityQuery(knnQuery, vectorSimilarity, similarity.score(vectorSimilarity, elementType, dims)); + } + return knnQuery; } private Query createExactKnnBitQuery(byte[] queryVector) { diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java index 1f05b215699b1..7e23075b1932e 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java @@ -32,6 +32,7 @@ public class ExactKnnQueryBuilder extends AbstractQueryBuilder rewrittenQueries = new ArrayList<>(filterQueries.size()); diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index e9ff8336ef4c9..511a7c90dbedc 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.rank.TestRankBuilder; import org.elasticsearch.search.vectors.KnnScoreDocQueryBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; +import org.elasticsearch.search.vectors.VectorData; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.transport.Transport; @@ -351,12 +352,14 @@ public void testRewriteShardSearchRequestWithRank() { KnnScoreDocQueryBuilder ksdqb0 = new KnnScoreDocQueryBuilder( new ScoreDoc[] { new ScoreDoc(1, 3.0f, 1), new ScoreDoc(4, 1.5f, 1) }, "vector", - new float[] { 0.0f } + VectorData.fromFloats(new float[] { 0.0f }), + null ); KnnScoreDocQueryBuilder ksdqb1 = new KnnScoreDocQueryBuilder( new ScoreDoc[] { new ScoreDoc(1, 2.0f, 1) }, "vector2", - new float[] { 0.0f } + VectorData.fromFloats(new float[] { 0.0f }), + null ); assertEquals( List.of(bm25, ksdqb0, ksdqb1), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index 2a4554091dc91..27dbc79333c2d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -215,7 +215,7 @@ public void testExactKnnQuery() { for (int i = 0; i < dims; i++) { queryVector[i] = randomFloat(); } - Query query = field.createExactKnnQuery(VectorData.fromFloats(queryVector)); + Query query = field.createExactKnnQuery(VectorData.fromFloats(queryVector), null); assertTrue(query instanceof DenseVectorQuery.Floats); } { @@ -233,7 +233,7 @@ public void testExactKnnQuery() { for (int i = 0; i < dims; i++) { queryVector[i] = randomByte(); } - Query query = field.createExactKnnQuery(VectorData.fromBytes(queryVector)); + Query query = field.createExactKnnQuery(VectorData.fromBytes(queryVector), null); assertTrue(query instanceof DenseVectorQuery.Bytes); } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java index f5d9f35e34695..0cc151a16e4b7 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.index.query.InnerHitsRewriteContext; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -296,6 +297,22 @@ private void assertBWCSerialization(QueryBuilder newQuery, QueryBuilder bwcQuery } } + public void testRewriteForInnerHits() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + InnerHitsRewriteContext innerHitsRewriteContext = new InnerHitsRewriteContext(context.getParserConfig(), System::currentTimeMillis); + KnnVectorQueryBuilder queryBuilder = createTestQueryBuilder(); + queryBuilder.boost(randomFloat()); + queryBuilder.queryName(randomAlphaOfLength(10)); + QueryBuilder rewritten = queryBuilder.rewrite(innerHitsRewriteContext); + assertTrue(rewritten instanceof ExactKnnQueryBuilder); + ExactKnnQueryBuilder exactKnnQueryBuilder = (ExactKnnQueryBuilder) rewritten; + assertEquals(queryBuilder.queryVector(), exactKnnQueryBuilder.getQuery()); + assertEquals(queryBuilder.getFieldName(), exactKnnQueryBuilder.getField()); + assertEquals(queryBuilder.boost(), exactKnnQueryBuilder.boost(), 0.0001f); + assertEquals(queryBuilder.queryName(), exactKnnQueryBuilder.queryName()); + assertEquals(queryBuilder.getVectorSimilarity(), exactKnnQueryBuilder.vectorSimilarity()); + } + public void testRewriteWithQueryVectorBuilder() throws Exception { int dims = randomInt(1024); float[] expectedArray = new float[dims]; diff --git a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java index 627f8a184a147..220682f11de01 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java @@ -62,12 +62,12 @@ protected ExactKnnQueryBuilder doCreateTestQueryBuilder() { for (int i = 0; i < VECTOR_DIMENSION; i++) { query[i] = randomFloat(); } - return new ExactKnnQueryBuilder(query, VECTOR_FIELD); + return new ExactKnnQueryBuilder(VectorData.fromFloats(query), VECTOR_FIELD, randomBoolean() ? randomFloat() : null); } @Override public void testValidOutput() { - ExactKnnQueryBuilder query = new ExactKnnQueryBuilder(new float[] { 1.0f, 2.0f, 3.0f }, "field"); + ExactKnnQueryBuilder query = new ExactKnnQueryBuilder(VectorData.fromFloats(new float[] { 1.0f, 2.0f, 3.0f }), "field", null); String expected = """ { "exact_knn" : { @@ -80,10 +80,29 @@ public void testValidOutput() { } }"""; assertEquals(expected, query.toString()); + query = new ExactKnnQueryBuilder(VectorData.fromFloats(new float[] { 1.0f, 2.0f, 3.0f }), "field", 1f); + expected = """ + { + "exact_knn" : { + "query" : [ + 1.0, + 2.0, + 3.0 + ], + "field" : "field", + "similarity" : 1.0 + } + }"""; + assertEquals(expected, query.toString()); } @Override protected void doAssertLuceneQuery(ExactKnnQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { + if (queryBuilder.vectorSimilarity() != null) { + assertTrue(query instanceof VectorSimilarityQuery); + VectorSimilarityQuery vectorSimilarityQuery = (VectorSimilarityQuery) query; + query = vectorSimilarityQuery.getInnerKnnQuery(); + } assertTrue(query instanceof DenseVectorQuery.Floats); DenseVectorQuery.Floats denseVectorQuery = (DenseVectorQuery.Floats) query; assertEquals(VECTOR_FIELD, denseVectorQuery.field); diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java index 67bc6bde9c1af..3733c884c401b 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java @@ -64,7 +64,8 @@ protected KnnScoreDocQueryBuilder doCreateTestQueryBuilder() { return new KnnScoreDocQueryBuilder( scoreDocs.toArray(new ScoreDoc[0]), randomBoolean() ? "field" : null, - randomBoolean() ? randomVector(10) : null + randomBoolean() ? VectorData.fromFloats(randomVector(10)) : null, + randomBoolean() ? randomFloat() : null ); } @@ -73,7 +74,8 @@ public void testValidOutput() { KnnScoreDocQueryBuilder query = new KnnScoreDocQueryBuilder( new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }, "field", - new float[] { 1.0f, 2.0f } + VectorData.fromFloats(new float[] { 1.0f, 2.0f }), + null ); String expected = """ { @@ -163,7 +165,8 @@ public void testRewriteToMatchNone() throws IOException { KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( new ScoreDoc[0], randomBoolean() ? "field" : null, - randomBoolean() ? randomVector(10) : null + randomBoolean() ? VectorData.fromFloats(randomVector(10)) : null, + randomBoolean() ? randomFloat() : null ); QueryRewriteContext context = randomBoolean() ? new InnerHitsRewriteContext(createSearchExecutionContext().getParserConfig(), System::currentTimeMillis) @@ -177,7 +180,8 @@ public void testRewriteForInnerHits() throws IOException { KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }, randomAlphaOfLength(10), - randomVector(10) + VectorData.fromFloats(randomVector(10)), + randomBoolean() ? randomFloat() : null ); queryBuilder.boost(randomFloat()); queryBuilder.queryName(randomAlphaOfLength(10)); @@ -188,6 +192,7 @@ public void testRewriteForInnerHits() throws IOException { assertEquals(queryBuilder.fieldName(), exactKnnQueryBuilder.getField()); assertEquals(queryBuilder.boost(), exactKnnQueryBuilder.boost(), 0.0001f); assertEquals(queryBuilder.queryName(), exactKnnQueryBuilder.queryName()); + assertEquals(queryBuilder.vectorSimilarity(), exactKnnQueryBuilder.vectorSimilarity()); } @Override @@ -226,7 +231,12 @@ public void testScoreDocQueryWeightCount() throws IOException { } ScoreDoc[] scoreDocs = scoreDocsList.toArray(new ScoreDoc[0]); - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs, "field", randomVector(10)); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( + scoreDocs, + "field", + VectorData.fromFloats(randomVector(10)), + null + ); Query query = queryBuilder.doToQuery(context); final Weight w = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0f); for (LeafReaderContext leafReaderContext : searcher.getLeafContexts()) { @@ -269,7 +279,12 @@ public void testScoreDocQuery() throws IOException { } ScoreDoc[] scoreDocs = scoreDocsList.toArray(new ScoreDoc[0]); - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs, "field", randomVector(10)); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( + scoreDocs, + "field", + VectorData.fromFloats(randomVector(10)), + null + ); final Query query = queryBuilder.doToQuery(context); final Weight w = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0f); From fd0251983ccff8b8cf1055f66d2b1be8be269784 Mon Sep 17 00:00:00 2001 From: Adam Demjen Date: Tue, 30 Jul 2024 09:05:06 -0400 Subject: [PATCH 134/406] Fix score count validation in reranker response (#111424) * Fix score count validation in reranker response (backport) * Update docs/changelog/111424.yaml * Delete docs/changelog/111212.yaml * Use constructor that exists * Update 111424.yaml --- docs/changelog/111424.yaml | 6 ++ ...ankFeaturePhaseRankCoordinatorContext.java | 96 +++++++++++++------ ...aturePhaseRankCoordinatorContextTests.java | 7 +- .../TextSimilarityRankTests.java | 70 +++++++++++--- .../TextSimilarityTestPlugin.java | 64 +++++++++++-- 5 files changed, 189 insertions(+), 54 deletions(-) create mode 100644 docs/changelog/111424.yaml diff --git a/docs/changelog/111424.yaml b/docs/changelog/111424.yaml new file mode 100644 index 0000000000000..386289557616f --- /dev/null +++ b/docs/changelog/111424.yaml @@ -0,0 +1,6 @@ +pr: 111424 +summary: Fix score count validation in reranker response +area: Ranking +type: bug +issues: + - 111202 diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java index a22126439e9e2..42413c35fcbff 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java @@ -14,8 +14,11 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; +import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankTaskSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankTaskSettings; import java.util.Arrays; import java.util.Comparator; @@ -53,24 +56,77 @@ public TextSimilarityRankFeaturePhaseRankCoordinatorContext( protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { // Wrap the provided rankListener to an ActionListener that would handle the response from the inference service // and then pass the results - final ActionListener actionListener = scoreListener.delegateFailureAndWrap((l, r) -> { - float[] scores = extractScoresFromResponse(r); - if (scores.length != featureDocs.length) { + final ActionListener inferenceListener = scoreListener.delegateFailureAndWrap((l, r) -> { + InferenceServiceResults results = r.getResults(); + assert results instanceof RankedDocsResults; + + // Ensure we get exactly as many scores as the number of docs we passed, otherwise we may return incorrect results + List rankedDocs = ((RankedDocsResults) results).getRankedDocs(); + if (rankedDocs.size() != featureDocs.length) { l.onFailure( - new IllegalStateException("Document and score count mismatch: [" + featureDocs.length + "] vs [" + scores.length + "]") + new IllegalStateException( + "Reranker input document count and returned score count mismatch: [" + + featureDocs.length + + "] vs [" + + rankedDocs.size() + + "]" + ) ); } else { + float[] scores = extractScoresFromRankedDocs(rankedDocs); l.onResponse(scores); } }); - List featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); - InferenceAction.Request request = generateRequest(featureData); - try { - client.execute(InferenceAction.INSTANCE, request, actionListener); - } finally { - request.decRef(); - } + // top N listener + ActionListener topNListener = scoreListener.delegateFailureAndWrap((l, r) -> { + // The rerank inference endpoint may have an override to return top N documents only, in that case let's fail fast to avoid + // assigning scores to the wrong input + Integer configuredTopN = null; + if (r.getEndpoints().isEmpty() == false + && r.getEndpoints().get(0).getTaskSettings() instanceof CohereRerankTaskSettings cohereTaskSettings) { + configuredTopN = cohereTaskSettings.getTopNDocumentsOnly(); + } else if (r.getEndpoints().isEmpty() == false + && r.getEndpoints().get(0).getTaskSettings() instanceof GoogleVertexAiRerankTaskSettings googleVertexAiTaskSettings) { + configuredTopN = googleVertexAiTaskSettings.topN(); + } + if (configuredTopN != null && configuredTopN < rankWindowSize) { + l.onFailure( + new IllegalArgumentException( + "Inference endpoint [" + + inferenceId + + "] is configured to return the top [" + + configuredTopN + + "] results, but rank_window_size is [" + + rankWindowSize + + "]. Reduce rank_window_size to be less than or equal to the configured top N value." + ) + ); + return; + } + List featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); + InferenceAction.Request inferenceRequest = generateRequest(featureData); + try { + client.execute(InferenceAction.INSTANCE, inferenceRequest, inferenceListener); + } finally { + inferenceRequest.decRef(); + } + }); + + GetInferenceModelAction.Request getModelRequest = new GetInferenceModelAction.Request(inferenceId, TaskType.RERANK); + client.execute(GetInferenceModelAction.INSTANCE, getModelRequest, topNListener); + } + + /** + * Sorts documents by score descending and discards those with a score less than minScore. + * @param originalDocs documents to process + */ + @Override + protected RankFeatureDoc[] preprocess(RankFeatureDoc[] originalDocs) { + return Arrays.stream(originalDocs) + .filter(doc -> minScore == null || doc.score >= minScore) + .sorted(Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()) + .toArray(RankFeatureDoc[]::new); } protected InferenceAction.Request generateRequest(List docFeatures) { @@ -85,11 +141,7 @@ protected InferenceAction.Request generateRequest(List docFeatures) { ); } - private float[] extractScoresFromResponse(InferenceAction.Response response) { - InferenceServiceResults results = response.getResults(); - assert results instanceof RankedDocsResults; - - List rankedDocs = ((RankedDocsResults) results).getRankedDocs(); + private float[] extractScoresFromRankedDocs(List rankedDocs) { float[] scores = new float[rankedDocs.size()]; for (RankedDocsResults.RankedDoc rankedDoc : rankedDocs) { scores[rankedDoc.index()] = rankedDoc.relevanceScore(); @@ -97,16 +149,4 @@ private float[] extractScoresFromResponse(InferenceAction.Response response) { return scores; } - - /** - * Sorts documents by score descending and discards those with a score less than minScore. - * @param originalDocs documents to process - */ - @Override - protected RankFeatureDoc[] preprocess(RankFeatureDoc[] originalDocs) { - return Arrays.stream(originalDocs) - .filter(doc -> minScore == null || doc.score >= minScore) - .sorted(Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()) - .toArray(RankFeatureDoc[]::new); - } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java index 50d91a2271de6..2e9be42b5c5d4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.search.rank.feature.RankFeatureDoc; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.argThat; @@ -54,10 +54,9 @@ public void onFailure(Exception e) { fail(); } }); - verify(mockClient).execute( - eq(InferenceAction.INSTANCE), - argThat(actionRequest -> ((InferenceAction.Request) actionRequest).getTaskType().equals(TaskType.RERANK)), + eq(GetInferenceModelAction.INSTANCE), + argThat(actionRequest -> ((GetInferenceModelAction.Request) actionRequest).getTaskType().equals(TaskType.RERANK)), any() ); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java index 7fbfe70dbcfe7..a26dc50097cf5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.rank.textsimilarity; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.inference.InputType; @@ -29,22 +30,46 @@ import java.util.Objects; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class TextSimilarityRankTests extends ESSingleNodeTestCase { /** - * {@code TextSimilarityRankBuilder} that simulates an inference call that returns a different number of results as the input. + * {@code TextSimilarityRankBuilder} that sets top_n in the inference endpoint's task settings. + * See {@code TextSimilarityTestPlugin -> TestFilter -> handleGetInferenceModelActionRequest} for the logic that extracts the top_n + * value. */ - public static class InvalidInferenceResultCountProvidingTextSimilarityRankBuilder extends TextSimilarityRankBuilder { + public static class TopNConfigurationAcceptingTextSimilarityRankBuilder extends TextSimilarityRankBuilder { - public InvalidInferenceResultCountProvidingTextSimilarityRankBuilder( + public TopNConfigurationAcceptingTextSimilarityRankBuilder( String field, String inferenceId, String inferenceText, int rankWindowSize, - Float minScore + Float minScore, + int topN + ) { + super(field, inferenceId + "-task-settings-top-" + topN, inferenceText, rankWindowSize, minScore); + } + } + + /** + * {@code TextSimilarityRankBuilder} that simulates an inference call returning N results. + */ + public static class InferenceResultCountAcceptingTextSimilarityRankBuilder extends TextSimilarityRankBuilder { + + private final int inferenceResultCount; + + public InferenceResultCountAcceptingTextSimilarityRankBuilder( + String field, + String inferenceId, + String inferenceText, + int rankWindowSize, + Float minScore, + int inferenceResultCount ) { super(field, inferenceId, inferenceText, rankWindowSize, minScore); + this.inferenceResultCount = inferenceResultCount; } @Override @@ -62,10 +87,10 @@ public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorCo protected InferenceAction.Request generateRequest(List docFeatures) { return new InferenceAction.Request( TaskType.RERANK, - inferenceId, + this.inferenceId, inferenceText, docFeatures, - Map.of("invalidInferenceResultCount", true), + Map.of("inferenceResultCount", inferenceResultCount), InputType.SEARCH, InferenceAction.Request.DEFAULT_TIMEOUT ); @@ -151,17 +176,38 @@ public void testRerankInferenceFailure() { ); } - public void testRerankInferenceResultMismatch() { - ElasticsearchAssertions.assertFailures( + public void testRerankTopNConfigurationAndRankWindowSizeMismatch() { + SearchPhaseExecutionException ex = expectThrows( + SearchPhaseExecutionException.class, // Execute search with text similarity reranking client.prepareSearch() .setRankBuilder( - new InvalidInferenceResultCountProvidingTextSimilarityRankBuilder("text", "my-rerank-model", "my query", 100, 1.5f) + // Simulate reranker configuration with top_n=3 in task_settings, which is different from rank_window_size=10 + // (Note: top_n comes from inferenceId, there's no other easy way of passing this to the mocked get model request) + new TopNConfigurationAcceptingTextSimilarityRankBuilder("text", "my-rerank-model", "my query", 100, 1.5f, 3) ) - .setQuery(QueryBuilders.matchAllQuery()), - RestStatus.INTERNAL_SERVER_ERROR, - containsString("Failed to execute phase [rank-feature], Computing updated ranks for results failed") + .setQuery(QueryBuilders.matchAllQuery()) + ); + assertThat(ex.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat( + ex.getDetailedMessage(), + containsString("Reduce rank_window_size to be less than or equal to the configured top N value") + ); + } + + public void testRerankInputSizeAndInferenceResultsMismatch() { + SearchPhaseExecutionException ex = expectThrows( + SearchPhaseExecutionException.class, + // Execute search with text similarity reranking + client.prepareSearch() + .setRankBuilder( + // Simulate reranker returning different number of results from input + new InferenceResultCountAcceptingTextSimilarityRankBuilder("text", "my-rerank-model", "my query", 100, 1.5f, 4) + ) + .setQuery(QueryBuilders.matchAllQuery()) ); + assertThat(ex.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(ex.getDetailedMessage(), containsString("Reranker input document count and returned score count mismatch")); } private static void assertHitHasRankScoreAndText(SearchHit hit, int expectedRank, float expectedScore, String expectedText) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java index 1e457a1a27c92..de81d13f92ea5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java @@ -21,7 +21,9 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.EmptyTaskSettings; import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; @@ -39,8 +41,13 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; +import org.elasticsearch.xpack.inference.services.cohere.CohereService; +import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; +import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankServiceSettings; +import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankTaskSettings; import java.io.IOException; import java.util.ArrayList; @@ -48,6 +55,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static java.util.Collections.singletonList; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -100,7 +109,6 @@ public int order() { } @Override - @SuppressWarnings("unchecked") public void apply( Task task, String action, @@ -108,23 +116,59 @@ public void app ActionListener listener, ActionFilterChain chain ) { - // For any other action than inference, execute normally - if (action.equals(InferenceAction.INSTANCE.name()) == false) { + if (action.equals(GetInferenceModelAction.INSTANCE.name())) { + assert request instanceof GetInferenceModelAction.Request; + handleGetInferenceModelActionRequest((GetInferenceModelAction.Request) request, listener); + } else if (action.equals(InferenceAction.INSTANCE.name())) { + assert request instanceof InferenceAction.Request; + handleInferenceActionRequest((InferenceAction.Request) request, listener); + } else { + // For any other action than get model and inference, execute normally chain.proceed(task, action, request, listener); - return; } + } - assert request instanceof InferenceAction.Request; - boolean shouldThrow = (boolean) ((InferenceAction.Request) request).getTaskSettings().getOrDefault("throwing", false); - boolean hasInvalidInferenceResultCount = (boolean) ((InferenceAction.Request) request).getTaskSettings() - .getOrDefault("invalidInferenceResultCount", false); + @SuppressWarnings("unchecked") + private void handleGetInferenceModelActionRequest( + GetInferenceModelAction.Request request, + ActionListener listener + ) { + String inferenceEntityId = request.getInferenceEntityId(); + Integer topN = null; + Matcher extractTopN = Pattern.compile(".*(task-settings-top-\\d+).*").matcher(inferenceEntityId); + if (extractTopN.find()) { + topN = Integer.parseInt(extractTopN.group(1).replaceAll("\\D", "")); + } + + ActionResponse response = new GetInferenceModelAction.Response( + List.of( + new ModelConfigurations( + request.getInferenceEntityId(), + request.getTaskType(), + CohereService.NAME, + new CohereRerankServiceSettings(new CohereServiceSettings()), + topN == null ? new EmptyTaskSettings() : new CohereRerankTaskSettings(topN, null, null) + ) + ) + ); + listener.onResponse((Response) response); + } + + @SuppressWarnings("unchecked") + private void handleInferenceActionRequest( + InferenceAction.Request request, + ActionListener listener + ) { + Map taskSettings = request.getTaskSettings(); + boolean shouldThrow = (boolean) taskSettings.getOrDefault("throwing", false); + Integer inferenceResultCount = (Integer) taskSettings.get("inferenceResultCount"); if (shouldThrow) { listener.onFailure(new UnsupportedOperationException("simulated failure")); } else { List rankedDocsResults = new ArrayList<>(); - List inputs = ((InferenceAction.Request) request).getInput(); - int resultCount = hasInvalidInferenceResultCount ? inputs.size() - 1 : inputs.size(); + List inputs = request.getInput(); + int resultCount = inferenceResultCount == null ? inputs.size() : inferenceResultCount; for (int i = 0; i < resultCount; i++) { rankedDocsResults.add(new RankedDocsResults.RankedDoc(i, Float.parseFloat(inputs.get(i)), inputs.get(i))); } From edfbd5d3dee129c5a9ee60dc3fed2aa34564af77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 30 Jul 2024 15:13:20 +0200 Subject: [PATCH 135/406] [DOCS] Adds elser service to the inference tutorial (#111406) (#111450) * [DOCS] Adds elser service to the inference tutorial. * [DOCS] Amends search intro text. * [DOCS] Addresses feedback. --- .../semantic-search-inference.asciidoc | 13 ++-- .../infer-api-ingest-pipeline-widget.asciidoc | 17 ++++++ .../infer-api-ingest-pipeline.asciidoc | 26 ++++++++ .../infer-api-mapping-widget.asciidoc | 17 ++++++ .../inference-api/infer-api-mapping.asciidoc | 28 +++++++++ .../infer-api-reindex-widget.asciidoc | 17 ++++++ .../inference-api/infer-api-reindex.asciidoc | 23 +++++++ .../infer-api-requirements-widget.asciidoc | 17 ++++++ .../infer-api-requirements.asciidoc | 7 +++ .../infer-api-search-widget.asciidoc | 17 ++++++ .../inference-api/infer-api-search.asciidoc | 61 +++++++++++++++++++ .../infer-api-task-widget.asciidoc | 19 +++++- .../inference-api/infer-api-task.asciidoc | 22 ++++++- 13 files changed, 274 insertions(+), 10 deletions(-) diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index ae27b46d4b876..f74bc65e31bf0 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -11,8 +11,7 @@ IMPORTANT: For the easiest way to perform semantic search in the {stack}, refer The following examples use Cohere's `embed-english-v3.0` model, the `all-mpnet-base-v2` model from HuggingFace, and OpenAI's `text-embedding-ada-002` second generation embedding model. You can use any Cohere and OpenAI models, they are all supported by the {infer} API. -For a list of supported models available on HuggingFace, refer to -<>. +For a list of recommended models available on HuggingFace, refer to <>. Azure based examples use models available through https://ai.azure.com/explore/models?selectedTask=embeddings[Azure AI Studio] or https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models[Azure OpenAI]. @@ -40,8 +39,7 @@ include::{es-ref-dir}/tab-widgets/inference-api/infer-api-task-widget.asciidoc[] ==== Create the index mapping The mapping of the destination index - the index that contains the embeddings that the model will create based on your input text - must be created. -The destination index must have a field with the <> -field type to index the output of the used model. +The destination index must have a field with the <> field type for most models and the <> field type for the sparse vector models like in the case of the `elser` service to index the output of the used model. include::{es-ref-dir}/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc[] @@ -49,8 +47,7 @@ include::{es-ref-dir}/tab-widgets/inference-api/infer-api-mapping-widget.asciido [[infer-service-inference-ingest-pipeline]] ==== Create an ingest pipeline with an inference processor -Create an <> with an -<> and use the model you created above to infer against the data that is being ingested in the pipeline. +Create an <> with an <> and use the model you created above to infer against the data that is being ingested in the pipeline. include::{es-ref-dir}/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc[] @@ -103,8 +100,8 @@ POST _tasks//_cancel ==== Semantic search After the data set has been enriched with the embeddings, you can query the data using {ref}/knn-search.html#knn-semantic-search[semantic search]. -Pass a -`query_vector_builder` to the k-nearest neighbor (kNN) vector search API, and provide the query text and the model you have used to create the embeddings. +In case of dense vector models, pass a `query_vector_builder` to the k-nearest neighbor (kNN) vector search API, and provide the query text and the model you have used to create the embeddings. +In case of a sparse vector model like ELSER, use a `sparse_vector` query, and provide the query text with the model you have used to create the embeddings. NOTE: If you cancelled the reindexing process, you run the query only a part of the data which affects the quality of your results. diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc index 6039d1de5345b..997dbbe8a20e6 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc @@ -7,6 +7,12 @@ id="infer-api-ingest-cohere"> Cohere + + + + + - +