diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 87fda361e5041..22612bfaf344a 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.27", "8.15.6", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.1", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.template.yml b/.buildkite/pipelines/periodic-packaging.template.yml index 9ba807d8ff741..38e4a71ccea93 100644 --- a/.buildkite/pipelines/periodic-packaging.template.yml +++ b/.buildkite/pipelines/periodic-packaging.template.yml @@ -12,17 +12,20 @@ steps: - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 + - oraclelinux-9 - sles-12 - sles-15 - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 + - ubuntu-2404 - rocky-8 - rocky-9 - rhel-7 - rhel-8 - rhel-9 - almalinux-8 + - almalinux-9 agents: provider: gcp image: family/elasticsearch-{{matrix.image}} diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 40c832e7cda21..a9a8a1a613ec5 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -13,17 +13,20 @@ steps: - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 + - oraclelinux-9 - sles-12 - sles-15 - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 + - ubuntu-2404 - rocky-8 - rocky-9 - rhel-7 - rhel-8 - rhel-9 - almalinux-8 + - almalinux-9 agents: provider: gcp image: family/elasticsearch-{{matrix.image}} @@ -560,8 +563,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.6 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.6 + - label: "{{matrix.image}} / 8.15.5 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.5 timeout_in_minutes: 300 matrix: setup: @@ -574,7 +577,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.15.6 + BWC_VERSION: 8.15.5 - label: "{{matrix.image}} / 8.16.2 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.2 @@ -592,8 +595,8 @@ steps: env: BWC_VERSION: 8.16.2 - - label: "{{matrix.image}} / 8.17.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.0 + - label: "{{matrix.image}} / 8.17.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.1 timeout_in_minutes: 300 matrix: setup: @@ -606,7 +609,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.17.0 + BWC_VERSION: 8.17.1 - label: "{{matrix.image}} / 8.18.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.18.0 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index 848e84221668e..69e8b4e72f641 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -12,17 +12,20 @@ steps: - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 + - oraclelinux-9 - sles-12 - sles-15 - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 + - ubuntu-2404 - rocky-8 - rocky-9 - rhel-7 - rhel-8 - rhel-9 - almalinux-8 + - almalinux-9 agents: provider: gcp image: family/elasticsearch-{{matrix.image}} diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index fc6cf96bcd42a..74a54101c545b 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -629,8 +629,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.6 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.6#bwcTest + - label: 8.15.5 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.5#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -639,7 +639,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.15.6 + BWC_VERSION: 8.15.5 retry: automatic: - exit_status: "-1" @@ -667,8 +667,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.17.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.17.0#bwcTest + - label: 8.17.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.17.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -677,7 +677,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.17.0 + BWC_VERSION: 8.17.1 retry: automatic: - exit_status: "-1" @@ -771,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.27", "8.15.6", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.1", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -819,7 +819,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.27", "8.15.6", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.1", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml index 8bec706bb758d..ddcdf531764d5 100644 --- a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml +++ b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml @@ -3,65 +3,9 @@ config: steps: - group: packaging-tests-unix steps: - - label: "{{matrix.image}} / docker / packaging-tests-unix" - key: "packaging-tests-unix-docker" - command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.docker-cloud-ess - timeout_in_minutes: 300 - matrix: - setup: - image: - - debian-11 - - debian-12 - - opensuse-leap-15 - - oraclelinux-7 - - oraclelinux-8 - - sles-12 - - sles-15 - - ubuntu-1804 - - ubuntu-2004 - - ubuntu-2204 - - rocky-8 - - rocky-9 - - rhel-7 - - rhel-8 - - rhel-9 - - almalinux-8 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - diskSizeGb: 350 - machineType: custom-16-32768 - - label: "{{matrix.image}} / packages / packaging-tests-unix" - key: "packaging-tests-unix-packages" - command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.packages - timeout_in_minutes: 300 - matrix: - setup: - image: - - debian-11 - - debian-12 - - opensuse-leap-15 - - oraclelinux-7 - - oraclelinux-8 - - sles-12 - - sles-15 - - ubuntu-1804 - - ubuntu-2004 - - ubuntu-2204 - - rocky-8 - - rocky-9 - - rhel-7 - - rhel-8 - - rhel-9 - - almalinux-8 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - diskSizeGb: 350 - machineType: custom-16-32768 - - label: "{{matrix.image}} / archives / packaging-tests-unix" - key: "packaging-tests-unix-archives" - command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.archives + - label: "{{matrix.image}} / {{matrix.PACKAGING_TASK}} / packaging-tests-unix" + key: "packaging-tests-unix" + command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.{{matrix.PACKAGING_TASK}} timeout_in_minutes: 300 matrix: setup: @@ -71,17 +15,24 @@ steps: - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 + - oraclelinux-9 - sles-12 - sles-15 - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 + - ubuntu-2404 - rocky-8 - rocky-9 - rhel-7 - rhel-8 - rhel-9 - almalinux-8 + - almalinux-9 + PACKAGING_TASK: + - docker-cloud-ess + - packages + - archives agents: provider: gcp image: family/elasticsearch-{{matrix.image}} diff --git a/.ci/bwcVersions b/.ci/bwcVersions index aa9ac9776dcc4..1375600f4f6af 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -32,7 +32,7 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.6" + - "8.15.5" - "8.16.2" - - "8.17.0" + - "8.17.1" - "8.18.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 77892cd297f3e..28b7bff191cb0 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,6 +1,5 @@ BWC_VERSION: - "7.17.27" - - "8.15.6" - "8.16.2" - - "8.17.0" + - "8.17.1" - "8.18.0" diff --git a/branches.json b/branches.json index 0e23a795664dd..95fbdb1efd655 100644 --- a/branches.json +++ b/branches.json @@ -13,9 +13,6 @@ { "branch": "8.x" }, - { - "branch": "8.15" - }, { "branch": "7.17" } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java index 41c0b4d67e1df..ea9009172c7e2 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java @@ -17,8 +17,6 @@ import org.gradle.api.Project; import java.io.File; -import java.util.Arrays; -import java.util.Map; /** * This plugin configures formatting for Java source using Spotless @@ -66,8 +64,7 @@ public void apply(Project project) { java.importOrderFile(new File(elasticsearchWorkspace, importOrderPath)); // Most formatting is done through the Eclipse formatter - java.eclipse().withP2Mirrors(Map.of("https://download.eclipse.org/", "https://mirror.umd.edu/eclipse/")) - .configFile(new File(elasticsearchWorkspace, formatterConfigPath)); + java.eclipse().configFile(new File(elasticsearchWorkspace, formatterConfigPath)); // Ensure blank lines are actually empty. Since formatters are applied in // order, apply this one last, otherwise non-empty blank lines can creep diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index 79b0725ec5eaf..79a6293eb49bd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -24,7 +24,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:32f06b169bb4b0f257fbb10e8c8379f06d3ee1355c89b3327cb623781a29590e", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:1b51ff6dba78c98d3e02b0cd64a8ce3238c7a40408d21e3af12a329d44db6f23", "-wolfi", "apk" ), diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index b13727ffb78da..60978b75c5bbf 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.18.0 -lucene = 9.12.0 +lucene = 9.12.1 bundled_jdk_vendor = openjdk bundled_jdk = 23+37@3c5b90190c68498b986a97f276efd28a diff --git a/build.gradle b/build.gradle index a5c518afef94e..cf7bcc96330a1 100644 --- a/build.gradle +++ b/build.gradle @@ -290,7 +290,10 @@ allprojects { if (project.path.contains(":distribution:docker")) { enabled = false } - + if (project.path.contains(":libs:cli")) { + // ensure we resolve p2 dependencies for the spotless eclipse formatter + dependsOn "spotlessJavaCheck" + } } plugins.withId('lifecycle-base') { diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index b65b974cd6b69..8d31856d4706f 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 9.12.0 -:lucene_version_path: 9_12_0 +:lucene_version: 9.12.1 +:lucene_version_path: 9_12_1 :jdk: 11.0.2 :jdk_major: 11 :build_type: tar diff --git a/docs/changelog/104683.yaml b/docs/changelog/104683.yaml deleted file mode 100644 index d4f40b59cfd91..0000000000000 --- a/docs/changelog/104683.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104683 -summary: "Feature: re-structure document ID generation favoring _id inverted index compression" -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/112881.yaml b/docs/changelog/112881.yaml deleted file mode 100644 index a8a0d542f8201..0000000000000 --- a/docs/changelog/112881.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112881 -summary: "ESQL: Remove parent from `FieldAttribute`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112989.yaml b/docs/changelog/112989.yaml deleted file mode 100644 index 364f012f94420..0000000000000 --- a/docs/changelog/112989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112989 -summary: Upgrade Bouncy Castle FIPS dependencies -area: Security -type: upgrade -issues: [] diff --git a/docs/changelog/113194.yaml b/docs/changelog/113194.yaml deleted file mode 100644 index 132659321c65e..0000000000000 --- a/docs/changelog/113194.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113194 -summary: Add Search Phase APM metrics -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/113713.yaml b/docs/changelog/113713.yaml deleted file mode 100644 index c5478c95e464d..0000000000000 --- a/docs/changelog/113713.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113713 -summary: Adding inference endpoint validation for `AzureAiStudioService` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113920.yaml b/docs/changelog/113920.yaml deleted file mode 100644 index 4699ae6d7dd65..0000000000000 --- a/docs/changelog/113920.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113920 -summary: Add initial support for `semantic_text` field type -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/114334.yaml b/docs/changelog/114334.yaml deleted file mode 100644 index d0fefe40c6970..0000000000000 --- a/docs/changelog/114334.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 114334 -summary: Don't return TEXT type for functions that take TEXT -area: ES|QL -type: bug -issues: - - 111537 - - 114333 diff --git a/docs/changelog/114482.yaml b/docs/changelog/114482.yaml deleted file mode 100644 index a5e2e981f7adc..0000000000000 --- a/docs/changelog/114482.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114482 -summary: Remove snapshot build restriction for match and qstr functions -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/114484.yaml b/docs/changelog/114484.yaml deleted file mode 100644 index 48f54ad0218bb..0000000000000 --- a/docs/changelog/114484.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114484 -summary: Add `docvalue_fields` Support for `dense_vector` Fields -area: Search -type: enhancement -issues: - - 108470 diff --git a/docs/changelog/114620.yaml b/docs/changelog/114620.yaml deleted file mode 100644 index 92498db92061f..0000000000000 --- a/docs/changelog/114620.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114620 -summary: "ES|QL: add metrics for functions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114665.yaml b/docs/changelog/114665.yaml deleted file mode 100644 index b90bb799bd896..0000000000000 --- a/docs/changelog/114665.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114665 -summary: Fixing remote ENRICH by pushing the Enrich inside `FragmentExec` -area: ES|QL -type: bug -issues: - - 105095 diff --git a/docs/changelog/114681.yaml b/docs/changelog/114681.yaml deleted file mode 100644 index 2a9901114e56f..0000000000000 --- a/docs/changelog/114681.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114681 -summary: "Support for unsigned 64 bit numbers in Cpu stats" -area: Infra/Core -type: enhancement -issues: - - 112274 diff --git a/docs/changelog/114739.yaml b/docs/changelog/114739.yaml deleted file mode 100644 index 16660e0a07e71..0000000000000 --- a/docs/changelog/114739.yaml +++ /dev/null @@ -1,20 +0,0 @@ -pr: 114739 -summary: Add a basic deprecation warning that the JSON format for non-detailed error responses is changing in v9 -area: Infra/REST API -type: deprecation -issues: [89387] -deprecation: - title: The format of non-detailed error responses is changing in v9 - area: REST API - details: |- - When an error occurs when processing a request, Elasticsearch returns information on that error in the REST response. - If `http:detailed_errors.enabled: false` is specified in node settings with the v8 REST API and below, - the format of this response changes significantly. - Starting with the v9 REST API, the JSON structure of responses with errors when the `http.detailed_errors.enabled: false` option is set - will be the same as when detailed errors are enabled (which is the default). - To keep using the existing format for non-detailed error responses, use the v8 REST API. - impact: |- - If you have set `http.detailed_errors.enabled: false` (the default is `true`) - the structure of JSON when any exceptions occur will change with the v9 REST API. - To keep using the existing format, use the v8 REST API. - notable: false diff --git a/docs/changelog/114742.yaml b/docs/changelog/114742.yaml deleted file mode 100644 index 5bd3dad4400b8..0000000000000 --- a/docs/changelog/114742.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114742 -summary: Adding support for additional mapping to simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114819.yaml b/docs/changelog/114819.yaml deleted file mode 100644 index f8d03f7024801..0000000000000 --- a/docs/changelog/114819.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114819 -summary: Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` -area: EQL -type: bug -issues: - - 114599 diff --git a/docs/changelog/114855.yaml b/docs/changelog/114855.yaml deleted file mode 100644 index daa6b985a14cf..0000000000000 --- a/docs/changelog/114855.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114855 -summary: Add query rules retriever -area: Relevance -type: enhancement -issues: [ ] diff --git a/docs/changelog/114862.yaml b/docs/changelog/114862.yaml deleted file mode 100644 index fb5f05fb8e2f9..0000000000000 --- a/docs/changelog/114862.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114862 -summary: "[Inference API] Add API to get configuration of inference services" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114869.yaml b/docs/changelog/114869.yaml deleted file mode 100644 index 755418e7ce4d9..0000000000000 --- a/docs/changelog/114869.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114869 -summary: Standardize error code when bulk body is invalid -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/114899.yaml b/docs/changelog/114899.yaml deleted file mode 100644 index 399aa5cf35409..0000000000000 --- a/docs/changelog/114899.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114899 -summary: "ES|QL: Fix stats by constant expression" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114924.yaml b/docs/changelog/114924.yaml deleted file mode 100644 index 536f446ef790d..0000000000000 --- a/docs/changelog/114924.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114924 -summary: Reducing error-level stack trace logging for normal events in `GeoIpDownloader` -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/114934.yaml b/docs/changelog/114934.yaml deleted file mode 100644 index 68628993b1c80..0000000000000 --- a/docs/changelog/114934.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114934 -summary: "[ES|QL] To_DatePeriod and To_TimeDuration return better error messages on\ - \ `union_type` fields" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114964.yaml b/docs/changelog/114964.yaml deleted file mode 100644 index 8274aeb76a937..0000000000000 --- a/docs/changelog/114964.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114964 -summary: Add a `monitor_stats` privilege and allow that privilege for remote cluster - privileges -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/115041.yaml b/docs/changelog/115041.yaml deleted file mode 100644 index f4c047c1569ec..0000000000000 --- a/docs/changelog/115041.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115041 -summary: Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` - to 100_000 -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/115091.yaml b/docs/changelog/115091.yaml deleted file mode 100644 index 762bcca5e8c52..0000000000000 --- a/docs/changelog/115091.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 115091 -summary: Added stricter range type checks and runtime warnings for ENRICH -area: ES|QL -type: bug -issues: - - 107357 - - 116799 diff --git a/docs/changelog/115102.yaml b/docs/changelog/115102.yaml deleted file mode 100644 index f679bb6c223a6..0000000000000 --- a/docs/changelog/115102.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115102 -summary: Watch Next Run Interval Resets On Shard Move or Node Restart -area: Watcher -type: bug -issues: - - 111433 diff --git a/docs/changelog/115142.yaml b/docs/changelog/115142.yaml deleted file mode 100644 index 2af968ae156da..0000000000000 --- a/docs/changelog/115142.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115142 -summary: Attempt to clean up index before remote transfer -area: Recovery -type: enhancement -issues: - - 104473 diff --git a/docs/changelog/115266.yaml b/docs/changelog/115266.yaml deleted file mode 100644 index 1d7fb1368c0e8..0000000000000 --- a/docs/changelog/115266.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115266 -summary: ES|QL CCS uses `skip_unavailable` setting for handling disconnected remote - clusters -area: ES|QL -type: enhancement -issues: [ 114531 ] diff --git a/docs/changelog/115359.yaml b/docs/changelog/115359.yaml deleted file mode 100644 index 65b3086dfc8d0..0000000000000 --- a/docs/changelog/115359.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115359 -summary: Adding support for simulate ingest mapping adddition for indices with mappings - that do not come from templates -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/115414.yaml b/docs/changelog/115414.yaml deleted file mode 100644 index 7475b765bb30e..0000000000000 --- a/docs/changelog/115414.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 115414 -summary: Mitigate IOSession timeouts -area: Machine Learning -type: bug -issues: - - 114385 - - 114327 - - 114105 - - 114232 diff --git a/docs/changelog/115585.yaml b/docs/changelog/115585.yaml deleted file mode 100644 index 02eecfc3d7d2b..0000000000000 --- a/docs/changelog/115585.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115459 -summary: Adds access to flags no_sub_matches and no_overlapping_matches to hyphenation-decompounder-tokenfilter -area: Search -type: enhancement -issues: - - 97849 diff --git a/docs/changelog/115640.yaml b/docs/changelog/115640.yaml deleted file mode 100644 index 5c4a943a9697d..0000000000000 --- a/docs/changelog/115640.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115640 -summary: Fix NPE on plugin sync -area: Infra/CLI -type: bug -issues: - - 114818 diff --git a/docs/changelog/115655.yaml b/docs/changelog/115655.yaml deleted file mode 100644 index 7184405867657..0000000000000 --- a/docs/changelog/115655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115655 -summary: Better sizing `BytesRef` for Strings in Queries -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/115678.yaml b/docs/changelog/115678.yaml deleted file mode 100644 index 31240eae1ebb4..0000000000000 --- a/docs/changelog/115678.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115678 -summary: "ESQL: extract common filter from aggs" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/115687.yaml b/docs/changelog/115687.yaml deleted file mode 100644 index 1180b4627c635..0000000000000 --- a/docs/changelog/115687.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115687 -summary: Add default ILM policies and switch to ILM for apm-data plugin -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/115744.yaml b/docs/changelog/115744.yaml deleted file mode 100644 index 9b8c91e59f451..0000000000000 --- a/docs/changelog/115744.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115744 -summary: Use `SearchStats` instead of field.isAggregatable in data node planning -area: ES|QL -type: bug -issues: - - 115737 diff --git a/docs/changelog/115792.yaml b/docs/changelog/115792.yaml deleted file mode 100644 index 2945a64e3043a..0000000000000 --- a/docs/changelog/115792.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115792 -summary: Add ES|QL `bit_length` function -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/115797.yaml b/docs/changelog/115797.yaml deleted file mode 100644 index 8adf51887c28a..0000000000000 --- a/docs/changelog/115797.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115797 -summary: Enable `_tier` based coordinator rewrites for all indices (not just mounted - indices) -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/115807.yaml b/docs/changelog/115807.yaml deleted file mode 100644 index d17cabca4bd03..0000000000000 --- a/docs/changelog/115807.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115807 -summary: "[Inference API] Improve chunked results error message" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/115812.yaml b/docs/changelog/115812.yaml deleted file mode 100644 index c45c97041eb00..0000000000000 --- a/docs/changelog/115812.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115812 -summary: "Prohibit changes to index mode, source, and sort settings during resize" -area: Logs -type: bug -issues: [] diff --git a/docs/changelog/115814.yaml b/docs/changelog/115814.yaml deleted file mode 100644 index 34f1213272d6f..0000000000000 --- a/docs/changelog/115814.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115814 -summary: "[ES|QL] Implicit casting string literal to intervals" -area: ES|QL -type: enhancement -issues: - - 115352 diff --git a/docs/changelog/115858.yaml b/docs/changelog/115858.yaml deleted file mode 100644 index 0c0408fa656f8..0000000000000 --- a/docs/changelog/115858.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115858 -summary: "ESQL: optimise aggregations filtered by false/null into evals" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/115876.yaml b/docs/changelog/115876.yaml new file mode 100644 index 0000000000000..29b34b8b250fb --- /dev/null +++ b/docs/changelog/115876.yaml @@ -0,0 +1,5 @@ +pr: 115876 +summary: Inference duration and error metrics +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/115994.yaml b/docs/changelog/115994.yaml deleted file mode 100644 index ac090018c8a12..0000000000000 --- a/docs/changelog/115994.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115994 -summary: Add logsdb telemetry -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/116021.yaml b/docs/changelog/116021.yaml deleted file mode 100644 index 58c84b26805b2..0000000000000 --- a/docs/changelog/116021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116021 -summary: Fields caps does not honour ignore_unavailable -area: Search -type: bug -issues: - - 107767 diff --git a/docs/changelog/116082.yaml b/docs/changelog/116082.yaml deleted file mode 100644 index 35ca5fb1ea82e..0000000000000 --- a/docs/changelog/116082.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116082 -summary: Add support for bitwise inner-product in painless -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/116128.yaml b/docs/changelog/116128.yaml deleted file mode 100644 index 7c38c0529c50d..0000000000000 --- a/docs/changelog/116128.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116128 -summary: Add num docs and size to logsdb telemetry -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/116211.yaml b/docs/changelog/116211.yaml deleted file mode 100644 index 6f55b1b2fef34..0000000000000 --- a/docs/changelog/116211.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116211 -summary: Use underlying `ByteBuf` `refCount` for `ReleasableBytesReference` -area: Network -type: bug -issues: [] diff --git a/docs/changelog/116325.yaml b/docs/changelog/116325.yaml deleted file mode 100644 index b8cd16dc85773..0000000000000 --- a/docs/changelog/116325.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116325 -summary: Adjust analyze limit exception to be a `bad_request` -area: Analysis -type: bug -issues: [] diff --git a/docs/changelog/116346.yaml b/docs/changelog/116346.yaml deleted file mode 100644 index 1dcace88a98c0..0000000000000 --- a/docs/changelog/116346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116346 -summary: "[ESQL] Fix Binary Comparisons on Date Nanos" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/116348.yaml b/docs/changelog/116348.yaml deleted file mode 100644 index 927ffc5a6121d..0000000000000 --- a/docs/changelog/116348.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116348 -summary: "ESQL: Honor skip_unavailable setting for nonmatching indices errors at planning time" -area: ES|QL -type: enhancement -issues: [ 114531 ] diff --git a/docs/changelog/116431.yaml b/docs/changelog/116431.yaml deleted file mode 100644 index 50c6baf1d01c7..0000000000000 --- a/docs/changelog/116431.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116431 -summary: Adds support for `input_type` field to Vertex inference service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/116437.yaml b/docs/changelog/116437.yaml deleted file mode 100644 index 94c2464db9980..0000000000000 --- a/docs/changelog/116437.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116437 -summary: Ensure class resource stream is closed in `ResourceUtils` -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/116447.yaml b/docs/changelog/116447.yaml deleted file mode 100644 index 8c0cea4b54578..0000000000000 --- a/docs/changelog/116447.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116447 -summary: Adding a deprecation info API warning for data streams with old indices -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/116515.yaml b/docs/changelog/116515.yaml deleted file mode 100644 index 6c0d473361e52..0000000000000 --- a/docs/changelog/116515.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116515 -summary: Esql/lookup join grammar -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/116583.yaml b/docs/changelog/116583.yaml deleted file mode 100644 index 3dc8337fe5b86..0000000000000 --- a/docs/changelog/116583.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 116583 -summary: Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions -area: ES|QL -type: bug -issues: - - 116529 - - 116544 diff --git a/docs/changelog/116591.yaml b/docs/changelog/116591.yaml deleted file mode 100644 index 60ef241e197b3..0000000000000 --- a/docs/changelog/116591.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116591 -summary: "Add support for `BYTE_LENGTH` scalar function" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/116656.yaml b/docs/changelog/116656.yaml deleted file mode 100644 index eb5d5a1cfc201..0000000000000 --- a/docs/changelog/116656.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116656 -summary: _validate does not honour ignore_unavailable -area: Search -type: bug -issues: - - 116594 diff --git a/docs/changelog/116664.yaml b/docs/changelog/116664.yaml deleted file mode 100644 index 36915fca39731..0000000000000 --- a/docs/changelog/116664.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116664 -summary: Hides `hugging_face_elser` service from the `GET _inference/_services API` -area: Machine Learning -type: bug -issues: - - 116644 diff --git a/docs/changelog/116689.yaml b/docs/changelog/116689.yaml deleted file mode 100644 index 0b1d1646868aa..0000000000000 --- a/docs/changelog/116689.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 116689 -summary: Deprecate `_source.mode` in mappings -area: Mapping -type: deprecation -issues: [] -deprecation: - title: Deprecate `_source.mode` in mappings - area: Mapping - details: Configuring `_source.mode` in mappings is deprecated and will be removed in future versions. Use `index.mapping.source.mode` index setting instead. - impact: Use `index.mapping.source.mode` index setting instead diff --git a/docs/changelog/116809.yaml b/docs/changelog/116809.yaml deleted file mode 100644 index 61dbeb233d576..0000000000000 --- a/docs/changelog/116809.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116809 -summary: "Distinguish `LicensedFeature` by family field" -area: License -type: bug -issues: [] diff --git a/docs/changelog/116819.yaml b/docs/changelog/116819.yaml deleted file mode 100644 index afe06c583fe55..0000000000000 --- a/docs/changelog/116819.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116819 -summary: ESQL - Add match operator (:) -area: Search -type: feature -issues: [] diff --git a/docs/changelog/116931.yaml b/docs/changelog/116931.yaml deleted file mode 100644 index 8b31d236ff137..0000000000000 --- a/docs/changelog/116931.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116931 -summary: Enable built-in Inference Endpoints and default for Semantic Text -area: "Machine Learning" -type: enhancement -issues: [] diff --git a/docs/changelog/116953.yaml b/docs/changelog/116953.yaml deleted file mode 100644 index 33616510d8fd0..0000000000000 --- a/docs/changelog/116953.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116953 -summary: Fix false positive date detection with trailing dot -area: Mapping -type: bug -issues: - - 116946 diff --git a/docs/changelog/116957.yaml b/docs/changelog/116957.yaml deleted file mode 100644 index 1020190de180d..0000000000000 --- a/docs/changelog/116957.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116957 -summary: Propagate scoring function through random sampler -area: Machine Learning -type: bug -issues: [ 110134 ] diff --git a/docs/changelog/116962.yaml b/docs/changelog/116962.yaml deleted file mode 100644 index 8f16b00e3f9fc..0000000000000 --- a/docs/changelog/116962.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116962 -summary: "Add special case for elastic reranker in inference API" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/116964.yaml b/docs/changelog/116964.yaml new file mode 100644 index 0000000000000..2e3ecd06fa098 --- /dev/null +++ b/docs/changelog/116964.yaml @@ -0,0 +1,6 @@ +pr: 116964 +summary: "Support ST_ENVELOPE and related (ST_XMIN, ST_XMAX, ST_YMIN, ST_YMAX) functions" +area: ES|QL +type: feature +issues: + - 104875 diff --git a/docs/changelog/116980.yaml b/docs/changelog/116980.yaml deleted file mode 100644 index 140324fd40b92..0000000000000 --- a/docs/changelog/116980.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116980 -summary: "ESQL: Fix sorts containing `_source`" -area: ES|QL -type: bug -issues: - - 116659 diff --git a/docs/changelog/116996.yaml b/docs/changelog/116996.yaml new file mode 100644 index 0000000000000..59f59355131bf --- /dev/null +++ b/docs/changelog/116996.yaml @@ -0,0 +1,5 @@ +pr: 116996 +summary: Initial work on `ReindexDatastreamIndexAction` +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/117080.yaml b/docs/changelog/117080.yaml deleted file mode 100644 index 5909f966e0fa2..0000000000000 --- a/docs/changelog/117080.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117080 -summary: Esql Enable Date Nanos (tech preview) -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/117105.yaml b/docs/changelog/117105.yaml deleted file mode 100644 index de56c4d521a62..0000000000000 --- a/docs/changelog/117105.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 117105 -summary: Fix long metric deserialize & add - auto-resize needs to be set manually -area: CCS -type: bug -issues: - - 116914 diff --git a/docs/changelog/117189.yaml b/docs/changelog/117189.yaml deleted file mode 100644 index e89c2d81506d9..0000000000000 --- a/docs/changelog/117189.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117189 -summary: Fix deberta tokenizer bug caused by bug in normalizer -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/117213.yaml b/docs/changelog/117213.yaml deleted file mode 100644 index 3b4cd0cee966c..0000000000000 --- a/docs/changelog/117213.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 117213 -summary: Fix reconstituting version string from components -area: Ingest Node -type: bug -issues: - - 116950 diff --git a/docs/changelog/117271.yaml b/docs/changelog/117271.yaml deleted file mode 100644 index 1a328279b9635..0000000000000 --- a/docs/changelog/117271.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117271 -summary: Don't skip shards in coord rewrite if timestamp is an alias -area: Search -type: bug -issues: [] diff --git a/docs/changelog/117294.yaml b/docs/changelog/117294.yaml deleted file mode 100644 index f6e80690de7ff..0000000000000 --- a/docs/changelog/117294.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117294 -summary: Always Emit Inference ID in Semantic Text Mapping -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/117297.yaml b/docs/changelog/117297.yaml deleted file mode 100644 index 4a0051bbae644..0000000000000 --- a/docs/changelog/117297.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117297 -summary: Fix CCS exchange when multi cluster aliases point to same cluster -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117312.yaml b/docs/changelog/117312.yaml deleted file mode 100644 index 302b91388ef2b..0000000000000 --- a/docs/changelog/117312.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117312 -summary: Add missing `async_search` query parameters to rest-api-spec -area: Search -type: bug -issues: [] diff --git a/docs/changelog/117316.yaml b/docs/changelog/117316.yaml deleted file mode 100644 index 69474d68a8190..0000000000000 --- a/docs/changelog/117316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117316 -summary: Fix validation of SORT by aggregate functions -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117350.yaml b/docs/changelog/117350.yaml deleted file mode 100644 index dca54f2037a87..0000000000000 --- a/docs/changelog/117350.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117350 -summary: "Improve halfbyte transposition performance, marginally improving bbq performance" -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/117404.yaml b/docs/changelog/117404.yaml deleted file mode 100644 index 0bab171956ca9..0000000000000 --- a/docs/changelog/117404.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117404 -summary: Correct bit * byte and bit * float script comparisons -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/117469.yaml b/docs/changelog/117469.yaml new file mode 100644 index 0000000000000..cfb14f78cb578 --- /dev/null +++ b/docs/changelog/117469.yaml @@ -0,0 +1,6 @@ +pr: 117469 +summary: Handle exceptions in query phase can match +area: Search +type: bug +issues: + - 104994 diff --git a/docs/changelog/117503.yaml b/docs/changelog/117503.yaml deleted file mode 100644 index d48741262b581..0000000000000 --- a/docs/changelog/117503.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 117503 -summary: Fix COUNT filter pushdown -area: ES|QL -type: bug -issues: - - 115522 diff --git a/docs/changelog/117551.yaml b/docs/changelog/117551.yaml deleted file mode 100644 index 081dd9203d82a..0000000000000 --- a/docs/changelog/117551.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117551 -summary: Fix stats by constant expresson with alias -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117575.yaml b/docs/changelog/117575.yaml deleted file mode 100644 index 781444ae97be5..0000000000000 --- a/docs/changelog/117575.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117575 -summary: Fix enrich cache size setting name -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/117589.yaml b/docs/changelog/117589.yaml new file mode 100644 index 0000000000000..e6880fd9477b5 --- /dev/null +++ b/docs/changelog/117589.yaml @@ -0,0 +1,5 @@ +pr: 117589 +summary: "Add Inference Unified API for chat completions for OpenAI" +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/117657.yaml b/docs/changelog/117657.yaml deleted file mode 100644 index 0a72e9dabe9e8..0000000000000 --- a/docs/changelog/117657.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117657 -summary: Ignore cancellation exceptions -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117762.yaml b/docs/changelog/117762.yaml deleted file mode 100644 index 123432e0f0507..0000000000000 --- a/docs/changelog/117762.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 117762 -summary: "Parse the contents of dynamic objects for [subobjects:false]" -area: Mapping -type: bug -issues: - - 117544 diff --git a/docs/changelog/117792.yaml b/docs/changelog/117792.yaml deleted file mode 100644 index 2d7ddda1ace40..0000000000000 --- a/docs/changelog/117792.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 117792 -summary: Address mapping and compute engine runtime field issues -area: Mapping -type: bug -issues: - - 117644 diff --git a/docs/changelog/117839.yaml b/docs/changelog/117839.yaml new file mode 100644 index 0000000000000..98c97b5078c02 --- /dev/null +++ b/docs/changelog/117839.yaml @@ -0,0 +1,5 @@ +pr: 117839 +summary: Add match support for `semantic_text` fields +area: "Search" +type: enhancement +issues: [] diff --git a/docs/changelog/117840.yaml b/docs/changelog/117840.yaml new file mode 100644 index 0000000000000..e1f469643af42 --- /dev/null +++ b/docs/changelog/117840.yaml @@ -0,0 +1,5 @@ +pr: 117840 +summary: Fix timeout ingesting an empty string into a `semantic_text` field +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/117842.yaml b/docs/changelog/117842.yaml deleted file mode 100644 index 9b528a158288c..0000000000000 --- a/docs/changelog/117842.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117842 -summary: Limit size of `Literal#toString` -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117865.yaml b/docs/changelog/117865.yaml deleted file mode 100644 index 33dc497725f92..0000000000000 --- a/docs/changelog/117865.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117865 -summary: Fix BWC for ES|QL cluster request -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117914.yaml b/docs/changelog/117914.yaml deleted file mode 100644 index da58ed7bb04b7..0000000000000 --- a/docs/changelog/117914.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117914 -summary: Fix for propagating filters from compound to inner retrievers -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/117920.yaml b/docs/changelog/117920.yaml deleted file mode 100644 index 1bfddabd4462d..0000000000000 --- a/docs/changelog/117920.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 117920 -summary: Wait for the worker service to shutdown before closing task processor -area: Machine Learning -type: bug -issues: - - 117563 diff --git a/docs/changelog/117953.yaml b/docs/changelog/117953.yaml deleted file mode 100644 index 62f0218b1cdc7..0000000000000 --- a/docs/changelog/117953.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117953 -summary: Acquire stats searcher for data stream stats -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/118035.yaml b/docs/changelog/118035.yaml new file mode 100644 index 0000000000000..fdeaa184723b9 --- /dev/null +++ b/docs/changelog/118035.yaml @@ -0,0 +1,6 @@ +pr: 118035 +summary: Include hidden indices in `DeprecationInfoAction` +area: Indices APIs +type: bug +issues: + - 118020 diff --git a/docs/changelog/118102.yaml b/docs/changelog/118102.yaml new file mode 100644 index 0000000000000..e5ec32cdddbec --- /dev/null +++ b/docs/changelog/118102.yaml @@ -0,0 +1,5 @@ +pr: 118102 +summary: "ESQL: Enterprise license enforcement for CCS" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118114.yaml b/docs/changelog/118114.yaml new file mode 100644 index 0000000000000..1b7532d5df981 --- /dev/null +++ b/docs/changelog/118114.yaml @@ -0,0 +1,5 @@ +pr: 118114 +summary: Enable physical plan verification +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118166.yaml b/docs/changelog/118166.yaml new file mode 100644 index 0000000000000..99e3fcafd5805 --- /dev/null +++ b/docs/changelog/118166.yaml @@ -0,0 +1,5 @@ +pr: 118166 +summary: Update minimum supported snapshot version for Machine Learning jobs to 8.3.0 +area: Machine Learning +type: upgrade +issues: [] diff --git a/docs/changelog/118173.yaml b/docs/changelog/118173.yaml new file mode 100644 index 0000000000000..a3c9054674ba5 --- /dev/null +++ b/docs/changelog/118173.yaml @@ -0,0 +1,5 @@ +pr: 118173 +summary: ES|QL categorize with multiple groupings +area: Machine Learning +type: feature +issues: [] diff --git a/docs/changelog/118194.yaml b/docs/changelog/118194.yaml new file mode 100644 index 0000000000000..0e5eca55d597c --- /dev/null +++ b/docs/changelog/118194.yaml @@ -0,0 +1,5 @@ +pr: 118194 +summary: Retry on `ClusterBlockException` on transform destination index +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118291.yaml b/docs/changelog/118291.yaml new file mode 100644 index 0000000000000..8001b3972e876 --- /dev/null +++ b/docs/changelog/118291.yaml @@ -0,0 +1,5 @@ +pr: 118291 +summary: Adding a migration reindex cancel API +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/118300.yaml b/docs/changelog/118300.yaml new file mode 100644 index 0000000000000..e11f5bfd73b2e --- /dev/null +++ b/docs/changelog/118300.yaml @@ -0,0 +1,5 @@ +pr: 118300 +summary: Upgrade to Lucene 9.12.1 +area: Search +type: upgrade +issues: [] diff --git a/docs/changelog/118354.yaml b/docs/changelog/118354.yaml deleted file mode 100644 index e2d72db121276..0000000000000 --- a/docs/changelog/118354.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 118354 -summary: Fix log message format bugs -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/118370.yaml b/docs/changelog/118370.yaml deleted file mode 100644 index e6a429448e493..0000000000000 --- a/docs/changelog/118370.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 118370 -summary: Fix concurrency issue with `ReinitializingSourceProvider` -area: Mapping -type: bug -issues: - - 118238 diff --git a/docs/changelog/118378.yaml b/docs/changelog/118378.yaml deleted file mode 100644 index d6c388b671968..0000000000000 --- a/docs/changelog/118378.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 118378 -summary: Opt into extra data stream resolution -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/118410.yaml b/docs/changelog/118410.yaml new file mode 100644 index 0000000000000..ccc7f71ee2e1c --- /dev/null +++ b/docs/changelog/118410.yaml @@ -0,0 +1,5 @@ +pr: 118410 +summary: Push down filter passed lookup join +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118435.yaml b/docs/changelog/118435.yaml new file mode 100644 index 0000000000000..8bccbeb54698d --- /dev/null +++ b/docs/changelog/118435.yaml @@ -0,0 +1,6 @@ +pr: 118435 +summary: '`_score` should not be a reserved attribute in ES|QL' +area: ES|QL +type: enhancement +issues: + - 118460 diff --git a/docs/changelog/118516.yaml b/docs/changelog/118516.yaml new file mode 100644 index 0000000000000..8a618a6d6cfd7 --- /dev/null +++ b/docs/changelog/118516.yaml @@ -0,0 +1,6 @@ +pr: 118435 +summary: Fix moving function linear weighted avg +area: Aggregations +type: bug +issues: + - 113751 diff --git a/docs/changelog/118704.yaml b/docs/changelog/118704.yaml new file mode 100644 index 0000000000000..c89735664f25e --- /dev/null +++ b/docs/changelog/118704.yaml @@ -0,0 +1,6 @@ +pr: 118704 +summary: Avoid updating settings version in `MetadataMigrateToDataStreamService` when + settings have not changed +area: Data streams +type: bug +issues: [] diff --git a/docs/reference/connector/docs/connectors-release-notes.asciidoc b/docs/reference/connector/docs/connectors-release-notes.asciidoc index e1ed082365c00..ff3d859e1a888 100644 --- a/docs/reference/connector/docs/connectors-release-notes.asciidoc +++ b/docs/reference/connector/docs/connectors-release-notes.asciidoc @@ -9,8 +9,76 @@ Prior to version *8.16.0*, the connector release notes were published as part of the {enterprise-search-ref}/changelog.html[Enterprise Search documentation]. ==== -*Release notes*: +[discrete] +[[es-connectors-release-notes-8-17-0]] +=== 8.17.0 -* <> +No notable changes in this release. -include::release-notes/connectors-release-notes-8.16.0.asciidoc[] +[discrete] +[[es-connectors-release-notes-8-16-1]] +=== 8.16.1 + +[discrete] +[[es-connectors-release-notes-8-16-1-bug-fixes]] +==== Bug fixes + +* Fixed a bug in the Outlook Connector where having deactivated users could cause the sync to fail. +See https://github.com/elastic/connectors/pull/2967[*PR 2967*]. +* Fixed a bug where the Confluence connector was not downloading some blog post documents due to unexpected response format. +See https://github.com/elastic/connectors/pull/2984[*PR 2984*]. + +[discrete] +[[es-connectors-release-notes-8-16-0]] +=== 8.16.0 + +[discrete] +[[es-connectors-release-notes-deprecation-notice]] +==== Deprecation notices + +* *Direct index access for connectors and sync jobs* ++ +IMPORTANT: Directly accessing connector and sync job state through `.elastic-connectors*` indices is deprecated, and will be disallowed entirely in a future release. + +* Instead, the Elasticsearch Connector APIs should be used. Connectors framework code now uses the <> by default. +See https://github.com/elastic/connectors/pull/2884[*PR 2902*]. + +* *Docker `enterprise-search` namespace deprecation* ++ +IMPORTANT: The `enterprise-search` Docker namespace is deprecated and will be discontinued in a future release. ++ +Starting in `8.16.0`, Docker images are being transitioned to the new `integrations` namespace, which will become the sole location for future releases. This affects the https://github.com/elastic/connectors[Elastic Connectors] and https://github.com/elastic/data-extraction-service[Elastic Data Extraction Service]. ++ +During this transition period, images are published to both namespaces: ++ +** *Example*: ++ +Deprecated namespace:: +`docker.elastic.co/enterprise-search/elastic-connectors:v8.16.0` ++ +New namespace:: +`docker.elastic.co/integrations/elastic-connectors:v8.16.0` ++ +Users should migrate to the new `integrations` namespace as soon as possible to ensure continued access to future releases. + +[discrete] +[[es-connectors-release-notes-8-16-0-enhancements]] +==== Enhancements + +* Docker images now use Chainguard's Wolfi base image (`docker.elastic.co/wolfi/jdk:openjdk-11-dev`), replacing the previous `ubuntu:focal` base. + +* The Sharepoint Online connector now works with the `Sites.Selected` permission instead of the broader permission `Sites.Read.All`. +See https://github.com/elastic/connectors/pull/2762[*PR 2762*]. + +* Starting in 8.16.0, connectors will start using proper SEMVER, with `MAJOR.MINOR.PATCH`, which aligns with Elasticsearch/Kibana versions. This drops the previous `.BUILD` suffix, which we used to release connectors between Elastic stack releases. Going forward, these inter-stack-release releases will be suffixed instead with `+`, aligning with Elastic Agent and conforming to SEMVER. +See https://github.com/elastic/connectors/pull/2749[*PR 2749*]. + +* Connector logs now use UTC timestamps, instead of machine-local timestamps. This only impacts logging output. +See https://github.com/elastic/connectors/pull/2695[*PR 2695*]. + +[discrete] +[[es-connectors-release-notes-8-16-0-bug-fixes]] +==== Bug fixes + +* The Dropbox connector now fetches the files from team shared folders. +See https://github.com/elastic/connectors/pull/2718[*PR 2718*]. diff --git a/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc deleted file mode 100644 index 7608336073176..0000000000000 --- a/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -[[es-connectors-release-notes-8-16-0]] -=== 8.16.0 connectors release notes - -[discrete] -[[es-connectors-release-notes-deprecation-notice]] -==== Deprecation notices - -* *Direct index access for connectors and sync jobs* -+ -IMPORTANT: Directly accessing connector and sync job state through `.elastic-connectors*` indices is deprecated, and will be disallowed entirely in a future release. - -* Instead, the Elasticsearch Connector APIs should be used. Connectors framework code now uses the <> by default. -See https://github.com/elastic/connectors/pull/2884[*PR 2902*]. - -* *Docker `enterprise-search` namespace deprecation* -+ -IMPORTANT: The `enterprise-search` Docker namespace is deprecated and will be discontinued in a future release. -+ -Starting in `8.16.0`, Docker images are being transitioned to the new `integrations` namespace, which will become the sole location for future releases. This affects the https://github.com/elastic/connectors[Elastic Connectors] and https://github.com/elastic/data-extraction-service[Elastic Data Extraction Service]. -+ -During this transition period, images are published to both namespaces: -+ -** *Example*: -+ -Deprecated namespace:: -`docker.elastic.co/enterprise-search/elastic-connectors:v8.16.0` -+ -New namespace:: -`docker.elastic.co/integrations/elastic-connectors:v8.16.0` -+ -Users should migrate to the new `integrations` namespace as soon as possible to ensure continued access to future releases. - -[discrete] -[[es-connectors-release-notes-8-16-0-enhancements]] -==== Enhancements - -* Docker images now use Chainguard's Wolfi base image (`docker.elastic.co/wolfi/jdk:openjdk-11-dev`), replacing the previous `ubuntu:focal` base. - -* The Sharepoint Online connector now works with the `Sites.Selected` permission instead of the broader permission `Sites.Read.All`. -See https://github.com/elastic/connectors/pull/2762[*PR 2762*]. - -* Starting in 8.16.0, connectors will start using proper SEMVER, with `MAJOR.MINOR.PATCH`, which aligns with Elasticsearch/Kibana versions. This drops the previous `.BUILD` suffix, which we used to release connectors between Elastic stack releases. Going forward, these inter-stack-release releases will be suffixed instead with `+`, aligning with Elastic Agent and conforming to SEMVER. -See https://github.com/elastic/connectors/pull/2749[*PR 2749*]. - -* Connector logs now use UTC timestamps, instead of machine-local timestamps. This only impacts logging output. -See https://github.com/elastic/connectors/pull/2695[*PR 2695*]. - -[discrete] -[[es-connectors-release-notes-8-16-0-bug-fixes]] -==== Bug fixes - -* The Dropbox connector now fetches the files from team shared folders. -See https://github.com/elastic/connectors/pull/2718[*PR 2718*]. \ No newline at end of file diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index ca6a7e489449b..a212c4e152b0e 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -71,7 +71,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=refresh] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing] `_source`:: -(Optional, list) Set to `false` to disable source retrieval (default: `true`). +(Optional, list) Set to `true` to enable source retrieval (default: `false`). You can also specify a comma-separated list of the fields you want to retrieve. `_source_excludes`:: diff --git a/docs/reference/esql/functions/description/st_envelope.asciidoc b/docs/reference/esql/functions/description/st_envelope.asciidoc new file mode 100644 index 0000000000000..6b7cf8d97538a --- /dev/null +++ b/docs/reference/esql/functions/description/st_envelope.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Determines the minimum bounding box of the supplied geometry. diff --git a/docs/reference/esql/functions/description/st_xmax.asciidoc b/docs/reference/esql/functions/description/st_xmax.asciidoc new file mode 100644 index 0000000000000..f33ec590bf2d4 --- /dev/null +++ b/docs/reference/esql/functions/description/st_xmax.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the maximum value of the `x` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value. diff --git a/docs/reference/esql/functions/description/st_xmin.asciidoc b/docs/reference/esql/functions/description/st_xmin.asciidoc new file mode 100644 index 0000000000000..b06cbfacde7bf --- /dev/null +++ b/docs/reference/esql/functions/description/st_xmin.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the minimum value of the `x` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value. diff --git a/docs/reference/esql/functions/description/st_ymax.asciidoc b/docs/reference/esql/functions/description/st_ymax.asciidoc new file mode 100644 index 0000000000000..f9475dd967562 --- /dev/null +++ b/docs/reference/esql/functions/description/st_ymax.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the maximum value of the `y` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value. diff --git a/docs/reference/esql/functions/description/st_ymin.asciidoc b/docs/reference/esql/functions/description/st_ymin.asciidoc new file mode 100644 index 0000000000000..7228c63a16030 --- /dev/null +++ b/docs/reference/esql/functions/description/st_ymin.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the minimum value of the `y` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value. diff --git a/docs/reference/esql/functions/examples/st_envelope.asciidoc b/docs/reference/esql/functions/examples/st_envelope.asciidoc new file mode 100644 index 0000000000000..df8c0ad5607fa --- /dev/null +++ b/docs/reference/esql/functions/examples/st_envelope.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_envelope] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_envelope-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_xmax.asciidoc b/docs/reference/esql/functions/examples/st_xmax.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_xmax.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_xmin.asciidoc b/docs/reference/esql/functions/examples/st_xmin.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_xmin.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_ymax.asciidoc b/docs/reference/esql/functions/examples/st_ymax.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_ymax.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_ymin.asciidoc b/docs/reference/esql/functions/examples/st_ymin.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_ymin.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/st_envelope.json b/docs/reference/esql/functions/kibana/definition/st_envelope.json new file mode 100644 index 0000000000000..6c00dda265ac7 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_envelope.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_envelope", + "description" : "Determines the minimum bounding box of the supplied geometry.", + "signatures" : [ + { + "params" : [ + { + "name" : "geometry", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "geometry", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "geometry", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "geo_shape" + }, + { + "params" : [ + { + "name" : "geometry", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "geo_shape" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| KEEP abbrev, airport, envelope" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_xmax.json b/docs/reference/esql/functions/kibana/definition/st_xmax.json new file mode 100644 index 0000000000000..7be22617c0992 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_xmax.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_xmax", + "description" : "Extracts the maximum value of the `x` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_xmin.json b/docs/reference/esql/functions/kibana/definition/st_xmin.json new file mode 100644 index 0000000000000..8052fdb861cea --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_xmin.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_xmin", + "description" : "Extracts the minimum value of the `x` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_ymax.json b/docs/reference/esql/functions/kibana/definition/st_ymax.json new file mode 100644 index 0000000000000..1a53f7388ea56 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_ymax.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_ymax", + "description" : "Extracts the maximum value of the `y` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_ymin.json b/docs/reference/esql/functions/kibana/definition/st_ymin.json new file mode 100644 index 0000000000000..e11722a8f9c07 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_ymin.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_ymin", + "description" : "Extracts the minimum value of the `y` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/docs/st_envelope.md b/docs/reference/esql/functions/kibana/docs/st_envelope.md new file mode 100644 index 0000000000000..5f4c3e4809a82 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_envelope.md @@ -0,0 +1,13 @@ + + +### ST_ENVELOPE +Determines the minimum bounding box of the supplied geometry. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| KEEP abbrev, airport, envelope +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_xmax.md b/docs/reference/esql/functions/kibana/docs/st_xmax.md new file mode 100644 index 0000000000000..bbde89df76fd0 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_xmax.md @@ -0,0 +1,15 @@ + + +### ST_XMAX +Extracts the maximum value of the `x` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_xmin.md b/docs/reference/esql/functions/kibana/docs/st_xmin.md new file mode 100644 index 0000000000000..1a6cee7dcfd62 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_xmin.md @@ -0,0 +1,15 @@ + + +### ST_XMIN +Extracts the minimum value of the `x` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_ymax.md b/docs/reference/esql/functions/kibana/docs/st_ymax.md new file mode 100644 index 0000000000000..61c9b6c288ca5 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_ymax.md @@ -0,0 +1,15 @@ + + +### ST_YMAX +Extracts the maximum value of the `y` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_ymin.md b/docs/reference/esql/functions/kibana/docs/st_ymin.md new file mode 100644 index 0000000000000..f5817f10f20a5 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_ymin.md @@ -0,0 +1,15 @@ + + +### ST_YMIN +Extracts the minimum value of the `y` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/layout/st_envelope.asciidoc b/docs/reference/esql/functions/layout/st_envelope.asciidoc new file mode 100644 index 0000000000000..a20d4275e0c9f --- /dev/null +++ b/docs/reference/esql/functions/layout/st_envelope.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_envelope]] +=== `ST_ENVELOPE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_envelope.svg[Embedded,opts=inline] + +include::../parameters/st_envelope.asciidoc[] +include::../description/st_envelope.asciidoc[] +include::../types/st_envelope.asciidoc[] +include::../examples/st_envelope.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_xmax.asciidoc b/docs/reference/esql/functions/layout/st_xmax.asciidoc new file mode 100644 index 0000000000000..b0c5e7695521e --- /dev/null +++ b/docs/reference/esql/functions/layout/st_xmax.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_xmax]] +=== `ST_XMAX` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_xmax.svg[Embedded,opts=inline] + +include::../parameters/st_xmax.asciidoc[] +include::../description/st_xmax.asciidoc[] +include::../types/st_xmax.asciidoc[] +include::../examples/st_xmax.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_xmin.asciidoc b/docs/reference/esql/functions/layout/st_xmin.asciidoc new file mode 100644 index 0000000000000..55fbad88c4cf0 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_xmin.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_xmin]] +=== `ST_XMIN` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_xmin.svg[Embedded,opts=inline] + +include::../parameters/st_xmin.asciidoc[] +include::../description/st_xmin.asciidoc[] +include::../types/st_xmin.asciidoc[] +include::../examples/st_xmin.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_ymax.asciidoc b/docs/reference/esql/functions/layout/st_ymax.asciidoc new file mode 100644 index 0000000000000..e1022de4ba664 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_ymax.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_ymax]] +=== `ST_YMAX` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_ymax.svg[Embedded,opts=inline] + +include::../parameters/st_ymax.asciidoc[] +include::../description/st_ymax.asciidoc[] +include::../types/st_ymax.asciidoc[] +include::../examples/st_ymax.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_ymin.asciidoc b/docs/reference/esql/functions/layout/st_ymin.asciidoc new file mode 100644 index 0000000000000..65511e1925e27 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_ymin.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_ymin]] +=== `ST_YMIN` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_ymin.svg[Embedded,opts=inline] + +include::../parameters/st_ymin.asciidoc[] +include::../description/st_ymin.asciidoc[] +include::../types/st_ymin.asciidoc[] +include::../examples/st_ymin.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/st_envelope.asciidoc b/docs/reference/esql/functions/parameters/st_envelope.asciidoc new file mode 100644 index 0000000000000..a31c6a85de367 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_envelope.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`geometry`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_xmax.asciidoc b/docs/reference/esql/functions/parameters/st_xmax.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_xmax.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_xmin.asciidoc b/docs/reference/esql/functions/parameters/st_xmin.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_xmin.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_ymax.asciidoc b/docs/reference/esql/functions/parameters/st_ymax.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_ymax.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_ymin.asciidoc b/docs/reference/esql/functions/parameters/st_ymin.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_ymin.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/signature/st_envelope.svg b/docs/reference/esql/functions/signature/st_envelope.svg new file mode 100644 index 0000000000000..885a60e6fd86f --- /dev/null +++ b/docs/reference/esql/functions/signature/st_envelope.svg @@ -0,0 +1 @@ +ST_ENVELOPE(geometry) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_xmax.svg b/docs/reference/esql/functions/signature/st_xmax.svg new file mode 100644 index 0000000000000..348d5a7f72763 --- /dev/null +++ b/docs/reference/esql/functions/signature/st_xmax.svg @@ -0,0 +1 @@ +ST_XMAX(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_xmin.svg b/docs/reference/esql/functions/signature/st_xmin.svg new file mode 100644 index 0000000000000..13d479b0458be --- /dev/null +++ b/docs/reference/esql/functions/signature/st_xmin.svg @@ -0,0 +1 @@ +ST_XMIN(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_ymax.svg b/docs/reference/esql/functions/signature/st_ymax.svg new file mode 100644 index 0000000000000..e6ecb00185c84 --- /dev/null +++ b/docs/reference/esql/functions/signature/st_ymax.svg @@ -0,0 +1 @@ +ST_YMAX(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_ymin.svg b/docs/reference/esql/functions/signature/st_ymin.svg new file mode 100644 index 0000000000000..ae722f1edc3d4 --- /dev/null +++ b/docs/reference/esql/functions/signature/st_ymin.svg @@ -0,0 +1 @@ +ST_YMIN(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/spatial-functions.asciidoc b/docs/reference/esql/functions/spatial-functions.asciidoc index eee44d337b4c6..c6a8467b39996 100644 --- a/docs/reference/esql/functions/spatial-functions.asciidoc +++ b/docs/reference/esql/functions/spatial-functions.asciidoc @@ -15,6 +15,11 @@ * <> * <> * <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> // end::spatial_list[] include::layout/st_distance.asciidoc[] @@ -24,3 +29,8 @@ include::layout/st_contains.asciidoc[] include::layout/st_within.asciidoc[] include::layout/st_x.asciidoc[] include::layout/st_y.asciidoc[] +include::layout/st_envelope.asciidoc[] +include::layout/st_xmax.asciidoc[] +include::layout/st_xmin.asciidoc[] +include::layout/st_ymax.asciidoc[] +include::layout/st_ymin.asciidoc[] diff --git a/docs/reference/esql/functions/types/st_envelope.asciidoc b/docs/reference/esql/functions/types/st_envelope.asciidoc new file mode 100644 index 0000000000000..43355394c6015 --- /dev/null +++ b/docs/reference/esql/functions/types/st_envelope.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +geometry | result +cartesian_point | cartesian_shape +cartesian_shape | cartesian_shape +geo_point | geo_shape +geo_shape | geo_shape +|=== diff --git a/docs/reference/esql/functions/types/st_xmax.asciidoc b/docs/reference/esql/functions/types/st_xmax.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_xmax.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/esql/functions/types/st_xmin.asciidoc b/docs/reference/esql/functions/types/st_xmin.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_xmin.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/esql/functions/types/st_ymax.asciidoc b/docs/reference/esql/functions/types/st_ymax.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_ymax.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/esql/functions/types/st_ymin.asciidoc b/docs/reference/esql/functions/types/st_ymin.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_ymin.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index d9b8f8802a04b..73e2db6e45e34 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -113,7 +113,7 @@ Index mode supports the following values: `standard`::: Standard indexing with default settings. -`tsds`::: _(data streams only)_ Index mode optimized for storage of metrics. For more information, see <>. +`time_series`::: _(data streams only)_ Index mode optimized for storage of metrics. For more information, see <>. `logsdb`::: _(data streams only)_ Index mode optimized for <>. diff --git a/docs/reference/mapping/params/index-prefixes.asciidoc b/docs/reference/mapping/params/index-prefixes.asciidoc index a143c5531c81b..1d5e844467b6f 100644 --- a/docs/reference/mapping/params/index-prefixes.asciidoc +++ b/docs/reference/mapping/params/index-prefixes.asciidoc @@ -54,3 +54,30 @@ PUT my-index-000001 } } -------------------------------- + +`index_prefixes` parameter instructs {ES} to create a subfield "._index_prefix". This +field will be used to do fast prefix queries. When doing highlighting, add "._index_prefix" +subfield to the `matched_fields` parameter to highlight the main field based on the +found matches of the prefix field, like in the request below: + +[source,console] +-------------------------------- +GET my-index-000001/_search +{ + "query": { + "prefix": { + "full_name": { + "value": "ki" + } + } + }, + "highlight": { + "fields": { + "full_name": { + "matched_fields": ["full_name._index_prefix"] + } + } + } +} +-------------------------------- +// TEST[continued] diff --git a/docs/reference/mapping/types/search-as-you-type.asciidoc b/docs/reference/mapping/types/search-as-you-type.asciidoc index 3c71389f4cebb..c2673a614c265 100644 --- a/docs/reference/mapping/types/search-as-you-type.asciidoc +++ b/docs/reference/mapping/types/search-as-you-type.asciidoc @@ -97,11 +97,21 @@ GET my-index-000001/_search "my_field._3gram" ] } + }, + "highlight": { + "fields": { + "my_field": { + "matched_fields": ["my_field._index_prefix"] <1> + } + } } } -------------------------------------------------- // TEST[continued] +<1> Adding "my_field._index_prefix" to the `matched_fields` allows to highlight + "my_field" also based on matches from "my_field._index_prefix" field. + [source,console-result] -------------------------------------------------- { @@ -126,6 +136,11 @@ GET my-index-000001/_search "_score" : 0.8630463, "_source" : { "my_field" : "quick brown fox jump lazy dog" + }, + "highlight": { + "my_field": [ + "quick brown fox jump lazy dog" + ] } } ] diff --git a/docs/reference/migration/migrate_8_17.asciidoc b/docs/reference/migration/migrate_8_17.asciidoc index 15bc6431c60ba..d499f1ce30497 100644 --- a/docs/reference/migration/migrate_8_17.asciidoc +++ b/docs/reference/migration/migrate_8_17.asciidoc @@ -18,3 +18,54 @@ coming::[8.17.0] There are no breaking changes in {es} 8.17. + +[discrete] +[[deprecated-8.17]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.17 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.17. + +To find out if you are using any deprecated functionality, +enable <>. + +[discrete] +[[deprecations_817_mapping]] +==== Mapping deprecations + +[[deprecate_source_mode_in_mappings]] +.Deprecate `_source.mode` in mappings +[%collapsible] +==== +*Details* + +Configuring `_source.mode` in mappings is deprecated and will be removed in future versions. Use `index.mapping.source.mode` index setting instead. + +*Impact* + +Use `index.mapping.source.mode` index setting instead +==== + +[discrete] +[[deprecations_817_rest_api]] +==== REST API deprecations + +[[format_of_non_detailed_error_responses_changing_in_v9]] +.The format of non-detailed error responses is changing in v9 +[%collapsible] +==== +*Details* + +When an error occurs when processing a request, Elasticsearch returns information on that error in the REST response. +If `http:detailed_errors.enabled: false` is specified in node settings with the v8 REST API and below, +the format of this response changes significantly. +Starting with the v9 REST API, the JSON structure of responses with errors when the `http.detailed_errors.enabled: false` option is set +will be the same as when detailed errors are enabled (which is the default). +To keep using the existing format for non-detailed error responses, use the v8 REST API. + +*Impact* + +If you have set `http.detailed_errors.enabled: false` (the default is `true`) +the structure of JSON when any exceptions occur will change with the v9 REST API. +To keep using the existing format, use the v8 REST API. +==== + diff --git a/docs/reference/release-notes/8.16.0.asciidoc b/docs/reference/release-notes/8.16.0.asciidoc index 88ae9f9e5b599..fd7ef963d9ff7 100644 --- a/docs/reference/release-notes/8.16.0.asciidoc +++ b/docs/reference/release-notes/8.16.0.asciidoc @@ -270,7 +270,6 @@ ES|QL:: * Push down filters even in case of renames in Evals {es-pull}114411[#114411] * Speed up CASE for some parameters {es-pull}112295[#112295] * Speed up grouping by bytes {es-pull}114021[#114021] -* Support INLINESTATS grouped on expressions {es-pull}111690[#111690] * Use less memory in listener {es-pull}114358[#114358] * Add support for cached strings in plan serialization {es-pull}112929[#112929] * Add Telemetry API and track top functions {es-pull}111226[#111226] @@ -462,7 +461,6 @@ ES|QL:: * Add boolean support to Max and Min aggs {es-pull}110527[#110527] * Add boolean support to TOP aggregation {es-pull}110718[#110718] * Added `mv_percentile` function {es-pull}111749[#111749] (issue: {es-issue}111591[#111591]) -* INLINESTATS {es-pull}109583[#109583] (issue: {es-issue}107589[#107589]) * Introduce per agg filter {es-pull}113735[#113735] * Strings support for MAX and MIN aggregations {es-pull}111544[#111544] * Support IP fields in MAX and MIN aggregations {es-pull}110921[#110921] diff --git a/docs/reference/release-notes/8.17.0.asciidoc b/docs/reference/release-notes/8.17.0.asciidoc index 59962fd83e9b7..9ddfd69c4343d 100644 --- a/docs/reference/release-notes/8.17.0.asciidoc +++ b/docs/reference/release-notes/8.17.0.asciidoc @@ -1,8 +1,204 @@ [[release-notes-8.17.0]] == {es} version 8.17.0 -coming[8.17.0] - Also see <>. +[[license-8.17.0]] +[float] +=== License changes + +[float] +==== Change to synthetic `_source` licensing + +Starting with this release, the <> feature is available exclusively with the Enterprise subscription. Synthetic `_source` is used in logs data streams (`logsdb` index mode), time series data streams (TSDS, using `time_series` index mode), application performance monitoring (APM), and Universal Profiling. + +If you are using these capabilities and are not on an Enterprise license, the change will result in increased storage requirements for new data, as the synthetic `_source` setting will be ignored. Existing indices that used synthetic `_source` will remain seamlessly accessible. + +Refer to the subscription page for https://www.elastic.co/subscriptions/cloud[Elastic Cloud] and {subscriptions}[Elastic Stack/self-managed] for the breakdown of available features and their associated subscription tiers. For further details and subscription options, contact your Elastic sales representative or https://www.elastic.co/contact[contact us]. + +[[bug-8.17.0]] +[float] +=== Bug fixes + +Analysis:: +* Adjust analyze limit exception to be a `bad_request` {es-pull}116325[#116325] + +CCS:: +* Fix long metric deserialize & add - auto-resize needs to be set manually {es-pull}117105[#117105] (issue: {es-issue}116914[#116914]) + +CRUD:: +* Standardize error code when bulk body is invalid {es-pull}114869[#114869] + +Data streams:: +* Acquire stats searcher for data stream stats {es-pull}117953[#117953] + +EQL:: +* Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` {es-pull}114819[#114819] (issue: {es-issue}114599[#114599]) + +ES|QL:: +* Added stricter range type checks and runtime warnings for ENRICH {es-pull}115091[#115091] (issues: {es-issue}107357[#107357], {es-issue}116799[#116799]) +* Don't return TEXT type for functions that take TEXT {es-pull}114334[#114334] (issues: {es-issue}111537[#111537], {es-issue}114333[#114333]) +* ESQL: Fix sorts containing `_source` {es-pull}116980[#116980] (issue: {es-issue}116659[#116659]) +* ES|QL: Fix stats by constant expression {es-pull}114899[#114899] +* Fix BWC for ES|QL cluster request {es-pull}117865[#117865] +* Fix CCS exchange when multi cluster aliases point to same cluster {es-pull}117297[#117297] +* Fix COUNT filter pushdown {es-pull}117503[#117503] (issue: {es-issue}115522[#115522]) +* Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions {es-pull}116583[#116583] (issues: {es-issue}116529[#116529], {es-issue}116544[#116544]) +* Fix stats by constant expresson with alias {es-pull}117551[#117551] +* Fix validation of SORT by aggregate functions {es-pull}117316[#117316] +* Fixing remote ENRICH by pushing the Enrich inside `FragmentExec` {es-pull}114665[#114665] (issue: {es-issue}105095[#105095]) +* Ignore cancellation exceptions {es-pull}117657[#117657] +* Limit size of `Literal#toString` {es-pull}117842[#117842] +* Opt into extra data stream resolution {es-pull}118378[#118378] +* Use `SearchStats` instead of field.isAggregatable in data node planning {es-pull}115744[#115744] (issue: {es-issue}115737[#115737]) +* [ESQL] Fix Binary Comparisons on Date Nanos {es-pull}116346[#116346] +* [ES|QL] To_DatePeriod and To_TimeDuration return better error messages on `union_type` fields {es-pull}114934[#114934] + +Infra/CLI:: +* Fix NPE on plugin sync {es-pull}115640[#115640] (issue: {es-issue}114818[#114818]) + +Ingest Node:: +* Fix enrich cache size setting name {es-pull}117575[#117575] +* Fix log message format bugs {es-pull}118354[#118354] +* Fix reconstituting version string from components {es-pull}117213[#117213] (issue: {es-issue}116950[#116950]) +* Reducing error-level stack trace logging for normal events in `GeoIpDownloader` {es-pull}114924[#114924] + +License:: +* Distinguish `LicensedFeature` by family field {es-pull}116809[#116809] + +Logs:: +* Prohibit changes to index mode, source, and sort settings during resize {es-pull}115812[#115812] + +Machine Learning:: +* Fix deberta tokenizer bug caused by bug in normalizer {es-pull}117189[#117189] +* Fix for Deberta tokenizer when input sequence exceeds 512 tokens {es-pull}117595[#117595] +* Hides `hugging_face_elser` service from the `GET _inference/_services API` {es-pull}116664[#116664] (issue: {es-issue}116644[#116644]) +* Mitigate IOSession timeouts {es-pull}115414[#115414] (issues: {es-issue}114385[#114385], {es-issue}114327[#114327], {es-issue}114105[#114105], {es-issue}114232[#114232]) +* Propagate scoring function through random sampler {es-pull}116957[#116957] (issue: {es-issue}110134[#110134]) +* Wait for the worker service to shutdown before closing task processor {es-pull}117920[#117920] (issue: {es-issue}117563[#117563]) + +Mapping:: +* Address mapping and compute engine runtime field issues {es-pull}117792[#117792] (issue: {es-issue}117644[#117644]) +* Always Emit Inference ID in Semantic Text Mapping {es-pull}117294[#117294] +* Fix concurrency issue with `ReinitializingSourceProvider` {es-pull}118370[#118370] (issue: {es-issue}118238[#118238]) +* Fix false positive date detection with trailing dot {es-pull}116953[#116953] (issue: {es-issue}116946[#116946]) +* Parse the contents of dynamic objects for [subobjects:false] {es-pull}117762[#117762] (issue: {es-issue}117544[#117544]) + +Network:: +* Use underlying `ByteBuf` `refCount` for `ReleasableBytesReference` {es-pull}116211[#116211] + +Ranking:: +* Fix for propagating filters from compound to inner retrievers {es-pull}117914[#117914] + +Search:: +* Add missing `async_search` query parameters to rest-api-spec {es-pull}117312[#117312] +* Don't skip shards in coord rewrite if timestamp is an alias {es-pull}117271[#117271] +* Fields caps does not honour ignore_unavailable {es-pull}116021[#116021] (issue: {es-issue}107767[#107767]) +* _validate does not honour ignore_unavailable {es-pull}116656[#116656] (issue: {es-issue}116594[#116594]) + +Vector Search:: +* Correct bit * byte and bit * float script comparisons {es-pull}117404[#117404] + +Watcher:: +* Watch Next Run Interval Resets On Shard Move or Node Restart {es-pull}115102[#115102] (issue: {es-issue}111433[#111433]) + +[[deprecation-8.17.0]] +[float] +=== Deprecations + +Infra/REST API:: +* Add a basic deprecation warning that the JSON format for non-detailed error responses is changing in v9 {es-pull}114739[#114739] (issue: {es-issue}89387[#89387]) + +Mapping:: +* Deprecate `_source.mode` in mappings {es-pull}116689[#116689] + +[[enhancement-8.17.0]] +[float] +=== Enhancements + +Authorization:: +* Add a `monitor_stats` privilege and allow that privilege for remote cluster privileges {es-pull}114964[#114964] + +Data streams:: +* Adding a deprecation info API warning for data streams with old indices {es-pull}116447[#116447] + +ES|QL:: +* Add ES|QL `bit_length` function {es-pull}115792[#115792] +* ESQL: Honor skip_unavailable setting for nonmatching indices errors at planning time {es-pull}116348[#116348] (issue: {es-issue}114531[#114531]) +* ESQL: Remove parent from `FieldAttribute` {es-pull}112881[#112881] +* ESQL: extract common filter from aggs {es-pull}115678[#115678] +* ESQL: optimise aggregations filtered by false/null into evals {es-pull}115858[#115858] +* ES|QL CCS uses `skip_unavailable` setting for handling disconnected remote clusters {es-pull}115266[#115266] (issue: {es-issue}114531[#114531]) +* ES|QL: add metrics for functions {es-pull}114620[#114620] +* Esql Enable Date Nanos (tech preview) {es-pull}117080[#117080] +* [ES|QL] Implicit casting string literal to intervals {es-pull}115814[#115814] (issue: {es-issue}115352[#115352]) + +Indices APIs:: +* Ensure class resource stream is closed in `ResourceUtils` {es-pull}116437[#116437] + +Inference:: +* Add version prefix to Inference Service API path {es-pull}117366[#117366] +* Update sparse text embeddings API route for Inference Service {es-pull}118368[#118368] + +Infra/Core:: +* Support for unsigned 64 bit numbers in Cpu stats {es-pull}114681[#114681] (issue: {es-issue}112274[#112274]) + +Ingest Node:: +* Adding support for additional mapping to simulate ingest API {es-pull}114742[#114742] +* Adding support for simulate ingest mapping adddition for indices with mappings that do not come from templates {es-pull}115359[#115359] + +Logs:: +* Add logsdb telemetry {es-pull}115994[#115994] +* Add num docs and size to logsdb telemetry {es-pull}116128[#116128] +* Feature: re-structure document ID generation favoring _id inverted index compression {es-pull}104683[#104683] + +Machine Learning:: +* Add special case for elastic reranker in inference API {es-pull}116962[#116962] +* Adding inference endpoint validation for `AzureAiStudioService` {es-pull}113713[#113713] +* Adds support for `input_type` field to Vertex inference service {es-pull}116431[#116431] +* Enable built-in Inference Endpoints and default for Semantic Text {es-pull}116931[#116931] +* Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` to 100_000 {es-pull}115041[#115041] +* [Inference API] Add API to get configuration of inference services {es-pull}114862[#114862] +* [Inference API] Improve chunked results error message {es-pull}115807[#115807] + +Recovery:: +* Attempt to clean up index before remote transfer {es-pull}115142[#115142] (issue: {es-issue}104473[#104473]) + +Relevance:: +* Add query rules retriever {es-pull}114855[#114855] + +Search:: +* Add Search Phase APM metrics {es-pull}113194[#113194] +* Add `docvalue_fields` Support for `dense_vector` Fields {es-pull}114484[#114484] (issue: {es-issue}108470[#108470]) +* Add initial support for `semantic_text` field type {es-pull}113920[#113920] +* Adds access to flags no_sub_matches and no_overlapping_matches to hyphenation-decompounder-tokenfilter {es-pull}115459[#115459] (issue: {es-issue}97849[#97849]) +* Better sizing `BytesRef` for Strings in Queries {es-pull}115655[#115655] +* Enable `_tier` based coordinator rewrites for all indices (not just mounted indices) {es-pull}115797[#115797] + +Vector Search:: +* Add support for bitwise inner-product in painless {es-pull}116082[#116082] +* Improve halfbyte transposition performance, marginally improving bbq performance {es-pull}117350[#117350] + +[[feature-8.17.0]] +[float] +=== New features + +Data streams:: +* Add default ILM policies and switch to ILM for apm-data plugin {es-pull}115687[#115687] + +ES|QL:: +* Add support for `BYTE_LENGTH` scalar function {es-pull}116591[#116591] +* Esql/lookup join grammar {es-pull}116515[#116515] +* Remove snapshot build restriction for match and qstr functions {es-pull}114482[#114482] + +Search:: +* ESQL - Add match operator (:) {es-pull}116819[#116819] + +[[upgrade-8.17.0]] +[float] +=== Upgrades + +Security:: +* Upgrade Bouncy Castle FIPS dependencies {es-pull}112989[#112989] + diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index 5859ccd03e511..a68f20fb1c656 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,25 +1,26 @@ |==== -| 20+^h| Remote cluster version +| 21+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 | 8.6 | 8.7 | 8.8 | 8.9 | 8.10 | 8.11 | 8.12 | 8.13 | 8.14 | 8.15 | 8.16 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.13 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.14 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.15 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.16 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 | 8.6 | 8.7 | 8.8 | 8.9 | 8.10 | 8.11 | 8.12 | 8.13 | 8.14 | 8.15 | 8.16 | 8.17 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.13 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.14 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.15 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.16 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.17 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} |==== diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index 0abc44c809d08..c2fcb88380f53 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -45,7 +45,7 @@ include::{es-ref-dir}/tab-widgets/inference-api/infer-api-task-widget.asciidoc[] ==== Create the index mapping The mapping of the destination index - the index that contains the embeddings that the model will create based on your input text - must be created. -The destination index must have a field with the <> field type for most models and the <> field type for the sparse vector models like in the case of the `elser` service to index the output of the used model. +The destination index must have a field with the <> field type for most models and the <> field type for the sparse vector models like in the case of the `elasticsearch` service to index the output of the used model. include::{es-ref-dir}/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc index eeecb4718658a..9e935f79aa0ac 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc @@ -8,7 +8,7 @@ the Cohere service. // tag::elser[] ELSER is a model trained by Elastic. If you have an {es} deployment, there is no -further requirement for using the {infer} API with the `elser` service. +further requirement for using the {infer} API with the `elasticsearch` service. // end::elser[] diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 6f89a2ecfce83..327e87b2783e4 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2920,129 +2920,129 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + diff --git a/libs/entitlement/qa/build.gradle b/libs/entitlement/qa/build.gradle index 86bafc34f4d00..7f46b2fe20a8a 100644 --- a/libs/entitlement/qa/build.gradle +++ b/libs/entitlement/qa/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'elasticsearch.internal-test-artifact' dependencies { javaRestTestImplementation project(':libs:entitlement:qa:common') - clusterPlugins project(':libs:entitlement:qa:entitlement-allowed') - clusterPlugins project(':libs:entitlement:qa:entitlement-allowed-nonmodular') + clusterModules project(':libs:entitlement:qa:entitlement-allowed') + clusterModules project(':libs:entitlement:qa:entitlement-allowed-nonmodular') clusterPlugins project(':libs:entitlement:qa:entitlement-denied') clusterPlugins project(':libs:entitlement:qa:entitlement-denied-nonmodular') } diff --git a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java index 5135fff44531a..2fd4472f5cc65 100644 --- a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java +++ b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java @@ -28,8 +28,8 @@ public class EntitlementsAllowedIT extends ESRestTestCase { @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .plugin("entitlement-allowed") - .plugin("entitlement-allowed-nonmodular") + .module("entitlement-allowed") + .module("entitlement-allowed-nonmodular") .systemProperty("es.entitlements.enabled", "true") .setting("xpack.security.enabled", "false") .build(); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java index 01b8f4d574f90..2abfb11964a93 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java @@ -15,7 +15,6 @@ import com.sun.tools.attach.VirtualMachine; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.Tuple; import org.elasticsearch.entitlement.initialization.EntitlementInitialization; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -29,7 +28,9 @@ public class EntitlementBootstrap { - public record BootstrapArgs(Collection> pluginData, Function, String> pluginResolver) {} + public record PluginData(Path pluginPath, boolean isModular, boolean isExternalPlugin) {} + + public record BootstrapArgs(Collection pluginData, Function, String> pluginResolver) {} private static BootstrapArgs bootstrapArgs; @@ -40,11 +41,11 @@ public static BootstrapArgs bootstrapArgs() { /** * Activates entitlement checking. Once this method returns, calls to methods protected by Entitlements from classes without a valid * policy will throw {@link org.elasticsearch.entitlement.runtime.api.NotEntitledException}. - * @param pluginData a collection of (plugin path, boolean), that holds the paths of all the installed Elasticsearch modules and - * plugins, and whether they are Java modular or not. + * @param pluginData a collection of (plugin path, boolean, boolean), that holds the paths of all the installed Elasticsearch modules + * and plugins, whether they are Java modular or not, and whether they are Elasticsearch modules or external plugins. * @param pluginResolver a functor to map a Java Class to the plugin it belongs to (the plugin name). */ - public static void bootstrap(Collection> pluginData, Function, String> pluginResolver) { + public static void bootstrap(Collection pluginData, Function, String> pluginResolver) { logger.debug("Loading entitlement agent"); if (EntitlementBootstrap.bootstrapArgs != null) { throw new IllegalStateException("plugin data is already set"); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index fb694308466c6..2956efa8eec31 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -9,7 +9,6 @@ package org.elasticsearch.entitlement.initialization; -import org.elasticsearch.core.Tuple; import org.elasticsearch.core.internal.provider.ProviderLocator; import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap; import org.elasticsearch.entitlement.bridge.EntitlementChecker; @@ -96,25 +95,25 @@ private static PolicyManager createPolicyManager() throws IOException { return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver()); } - private static Map createPluginPolicies(Collection> pluginData) throws IOException { + private static Map createPluginPolicies(Collection pluginData) throws IOException { Map pluginPolicies = new HashMap<>(pluginData.size()); - for (Tuple entry : pluginData) { - Path pluginRoot = entry.v1(); - boolean isModular = entry.v2(); - + for (var entry : pluginData) { + Path pluginRoot = entry.pluginPath(); String pluginName = pluginRoot.getFileName().toString(); - final Policy policy = loadPluginPolicy(pluginRoot, isModular, pluginName); + + final Policy policy = loadPluginPolicy(pluginRoot, entry.isModular(), pluginName, entry.isExternalPlugin()); pluginPolicies.put(pluginName, policy); } return pluginPolicies; } - private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, String pluginName) throws IOException { + private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, String pluginName, boolean isExternalPlugin) + throws IOException { Path policyFile = pluginRoot.resolve(POLICY_FILE_NAME); final Set moduleNames = getModuleNames(pluginRoot, isModular); - final Policy policy = parsePolicyIfExists(pluginName, policyFile); + final Policy policy = parsePolicyIfExists(pluginName, policyFile, isExternalPlugin); // TODO: should this check actually be part of the parser? for (Scope scope : policy.scopes) { @@ -125,9 +124,9 @@ private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, Strin return policy; } - private static Policy parsePolicyIfExists(String pluginName, Path policyFile) throws IOException { + private static Policy parsePolicyIfExists(String pluginName, Path policyFile, boolean isExternalPlugin) throws IOException { if (Files.exists(policyFile)) { - return new PolicyParser(Files.newInputStream(policyFile, StandardOpenOption.READ), pluginName).parsePolicy(); + return new PolicyParser(Files.newInputStream(policyFile, StandardOpenOption.READ), pluginName, isExternalPlugin).parsePolicy(); } return new Policy(pluginName, List.of()); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java index bb1205696b49e..768babdb840f5 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExternalEntitlement.java @@ -33,4 +33,12 @@ * have to match the parameter names of the constructor. */ String[] parameterNames() default {}; + + /** + * This flag indicates if this Entitlement can be used in external plugins, + * or if it can be used only in Elasticsearch modules ("internal" plugins). + * Using an entitlement that is not {@code pluginsAccessible} in an external + * plugin policy will throw in exception while parsing. + */ + boolean esModulesOnly() default true; } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java index d0837bc096183..4fdbcc93ea6e0 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java @@ -26,7 +26,7 @@ public class FileEntitlement implements Entitlement { private final String path; private final int actions; - @ExternalEntitlement(parameterNames = { "path", "actions" }) + @ExternalEntitlement(parameterNames = { "path", "actions" }, esModulesOnly = false) public FileEntitlement(String path, List actionsList) { this.path = path; int actionsInt = 0; diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index a77c86d5ffd04..8d3efe4eb98e6 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -18,7 +18,6 @@ import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.IdentityHashMap; import java.util.List; @@ -56,8 +55,8 @@ public Stream getEntitlements(Class entitlementCla final Map moduleEntitlementsMap = new HashMap<>(); - protected final Policy serverPolicy; - protected final Map pluginPolicies; + protected final Map> serverEntitlements; + protected final Map>> pluginsEntitlements; private final Function, String> pluginResolver; public static final String ALL_UNNAMED = "ALL-UNNAMED"; @@ -79,19 +78,16 @@ private static Set findSystemModules() { } public PolicyManager(Policy defaultPolicy, Map pluginPolicies, Function, String> pluginResolver) { - this.serverPolicy = Objects.requireNonNull(defaultPolicy); - this.pluginPolicies = Collections.unmodifiableMap(Objects.requireNonNull(pluginPolicies)); + this.serverEntitlements = buildScopeEntitlementsMap(Objects.requireNonNull(defaultPolicy)); + this.pluginsEntitlements = Objects.requireNonNull(pluginPolicies) + .entrySet() + .stream() + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); this.pluginResolver = pluginResolver; } - private static List lookupEntitlementsForModule(Policy policy, String moduleName) { - for (int i = 0; i < policy.scopes.size(); ++i) { - var scope = policy.scopes.get(i); - if (scope.name.equals(moduleName)) { - return scope.entitlements; - } - } - return null; + private static Map> buildScopeEntitlementsMap(Policy policy) { + return policy.scopes.stream().collect(Collectors.toUnmodifiableMap(scope -> scope.name, scope -> scope.entitlements)); } public void checkExitVM(Class callerClass) { @@ -141,21 +137,21 @@ ModuleEntitlements getEntitlementsOrThrow(Class callerClass, Module requestin if (isServerModule(requestingModule)) { var scopeName = requestingModule.getName(); - return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverPolicy, scopeName); + return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverEntitlements, scopeName); } // plugins var pluginName = pluginResolver.apply(callerClass); if (pluginName != null) { - var pluginPolicy = pluginPolicies.get(pluginName); - if (pluginPolicy != null) { + var pluginEntitlements = pluginsEntitlements.get(pluginName); + if (pluginEntitlements != null) { final String scopeName; if (requestingModule.isNamed() == false) { scopeName = ALL_UNNAMED; } else { scopeName = requestingModule.getName(); } - return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginPolicy, scopeName); + return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginEntitlements, scopeName); } } @@ -167,15 +163,20 @@ private static String buildModuleNoPolicyMessage(Class callerClass, Module re return Strings.format("Missing entitlement policy: caller [%s], module [%s]", callerClass, requestingModule.getName()); } - private ModuleEntitlements getModuleEntitlementsOrThrow(Class callerClass, Module module, Policy policy, String moduleName) { - var entitlements = lookupEntitlementsForModule(policy, moduleName); + private ModuleEntitlements getModuleEntitlementsOrThrow( + Class callerClass, + Module module, + Map> scopeEntitlements, + String moduleName + ) { + var entitlements = scopeEntitlements.get(moduleName); if (entitlements == null) { // Module without entitlements - remember we don't have any moduleEntitlementsMap.put(module, ModuleEntitlements.NONE); throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, module)); } // We have a policy for this module - var classEntitlements = createClassEntitlements(entitlements); + var classEntitlements = new ModuleEntitlements(entitlements); moduleEntitlementsMap.put(module, classEntitlements); return classEntitlements; } @@ -184,10 +185,6 @@ private static boolean isServerModule(Module requestingModule) { return requestingModule.isNamed() && requestingModule.getLayer() == ModuleLayer.boot(); } - private ModuleEntitlements createClassEntitlements(List entitlements) { - return new ModuleEntitlements(entitlements); - } - private static Module requestingModule(Class callerClass) { if (callerClass != null) { Module callerModule = callerClass.getModule(); @@ -222,6 +219,6 @@ private static boolean isTriviallyAllowed(Module requestingModule) { @Override public String toString() { - return "PolicyManager{" + "serverPolicy=" + serverPolicy + ", pluginPolicies=" + pluginPolicies + '}'; + return "PolicyManager{" + "serverEntitlements=" + serverEntitlements + ", pluginsEntitlements=" + pluginsEntitlements + '}'; } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java index 0d1a7c14ece4b..fb63d5ffbeb48 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java @@ -39,6 +39,7 @@ public class PolicyParser { protected final XContentParser policyParser; protected final String policyName; + private final boolean isExternalPlugin; static String getEntitlementTypeName(Class entitlementClass) { var entitlementClassName = entitlementClass.getSimpleName(); @@ -56,9 +57,10 @@ static String getEntitlementTypeName(Class entitlementCla .collect(Collectors.joining("_")); } - public PolicyParser(InputStream inputStream, String policyName) throws IOException { + public PolicyParser(InputStream inputStream, String policyName, boolean isExternalPlugin) throws IOException { this.policyParser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, Objects.requireNonNull(inputStream)); this.policyName = policyName; + this.isExternalPlugin = isExternalPlugin; } public Policy parsePolicy() { @@ -125,6 +127,10 @@ protected Entitlement parseEntitlement(String scopeName, String entitlementType) throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]"); } + if (entitlementMetadata.esModulesOnly() && isExternalPlugin) { + throw newPolicyParserException("entitlement type [" + entitlementType + "] is allowed only on modules"); + } + Class[] parameterTypes = entitlementConstructor.getParameterTypes(); String[] parametersNames = entitlementMetadata.parameterNames(); diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java index 7eb2b1fb476b3..dfcc5d8916f2c 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java @@ -19,7 +19,7 @@ public class PolicyParserFailureTests extends ESTestCase { public void testParserSyntaxFailures() { PolicyParserException ppe = expectThrows( PolicyParserException.class, - () -> new PolicyParser(new ByteArrayInputStream("[]".getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml") + () -> new PolicyParser(new ByteArrayInputStream("[]".getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml", false) .parsePolicy() ); assertEquals("[1:1] policy parsing error for [test-failure-policy.yaml]: expected object ", ppe.getMessage()); @@ -29,7 +29,7 @@ public void testEntitlementDoesNotExist() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - does_not_exist: {} - """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); + """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml", false).parsePolicy()); assertEquals( "[2:5] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name]: " + "unknown entitlement type [does_not_exist]", @@ -41,7 +41,7 @@ public void testEntitlementMissingParameter() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - file: {} - """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); + """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml", false).parsePolicy()); assertEquals( "[2:12] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "for entitlement type [file]: missing entitlement parameter [path]", @@ -52,7 +52,7 @@ public void testEntitlementMissingParameter() { entitlement-module-name: - file: path: test-path - """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); + """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml", false).parsePolicy()); assertEquals( "[4:1] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "for entitlement type [file]: missing entitlement parameter [actions]", @@ -68,11 +68,22 @@ public void testEntitlementExtraneousParameter() { actions: - read extra: test - """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); + """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml", false).parsePolicy()); assertEquals( "[7:1] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "for entitlement type [file]: extraneous entitlement parameter(s) {extra=test}", ppe.getMessage() ); } + + public void testEntitlementIsNotForExternalPlugins() { + PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" + entitlement-module-name: + - create_class_loader + """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml", true).parsePolicy()); + assertEquals( + "[2:5] policy parsing error for [test-failure-policy.yaml]: entitlement type [create_class_loader] is allowed only on modules", + ppe.getMessage() + ); + } } diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java index a514cfe418895..633c76cb8c04f 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java @@ -37,7 +37,17 @@ public void testGetEntitlementTypeName() { } public void testPolicyBuilder() throws IOException { - Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml") + Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml", false) + .parsePolicy(); + Policy builtPolicy = new Policy( + "test-policy.yaml", + List.of(new Scope("entitlement-module-name", List.of(new FileEntitlement("test/path/to/file", List.of("read", "write"))))) + ); + assertEquals(parsedPolicy, builtPolicy); + } + + public void testPolicyBuilderOnExternalPlugin() throws IOException { + Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml", true) .parsePolicy(); Policy builtPolicy = new Policy( "test-policy.yaml", @@ -50,7 +60,7 @@ public void testParseCreateClassloader() throws IOException { Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - create_class_loader - """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml").parsePolicy(); + """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", false).parsePolicy(); Policy builtPolicy = new Policy( "test-policy.yaml", List.of(new Scope("entitlement-module-name", List.of(new CreateClassLoaderEntitlement()))) diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java new file mode 100644 index 0000000000000..eee4a62c7d588 --- /dev/null +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java @@ -0,0 +1,356 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.geometry.utils; + +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.GeometryVisitor; +import org.elasticsearch.geometry.Line; +import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; +import org.elasticsearch.geometry.MultiPoint; +import org.elasticsearch.geometry.MultiPolygon; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Polygon; +import org.elasticsearch.geometry.Rectangle; + +import java.util.Locale; +import java.util.Optional; + +/** + * This visitor is designed to determine the spatial envelope (or BBOX or MBR) of a potentially complex geometry. + * It has two modes: + *
    + *
  • + * Cartesian mode: The envelope is determined by the minimum and maximum x/y coordinates. + * Incoming BBOX geometries with minX > maxX are treated as invalid. + * Resulting BBOX geometries will always have minX <= maxX. + *
  • + *
  • + * Geographic mode: The envelope is determined by the minimum and maximum x/y coordinates, + * considering the possibility of wrapping the longitude around the dateline. + * A bounding box can be determined either by wrapping the longitude around the dateline or not, + * and the smaller bounding box is chosen. It is possible to disable the wrapping of the longitude. + *
+ * Usage of this is as simple as: + * + * Optional<Rectangle> bbox = SpatialEnvelopeVisitor.visit(geometry); + * if (bbox.isPresent()) { + * Rectangle envelope = bbox.get(); + * // Do stuff with the envelope + * } + * + * It is also possible to create the inner PointVisitor separately, as well as use the visitor for multiple geometries. + * + * PointVisitor pointVisitor = new CartesianPointVisitor(); + * SpatialEnvelopeVisitor visitor = new SpatialEnvelopeVisitor(pointVisitor); + * for (Geometry geometry : geometries) { + * geometry.visit(visitor); + * } + * if (visitor.isValid()) { + * Rectangle envelope = visitor.getResult(); + * // Do stuff with the envelope + * } + * + * Code that wishes to modify the behaviour of the visitor can implement the PointVisitor interface, + * or extend the existing implementations. + */ +public class SpatialEnvelopeVisitor implements GeometryVisitor { + + private final PointVisitor pointVisitor; + + public SpatialEnvelopeVisitor(PointVisitor pointVisitor) { + this.pointVisitor = pointVisitor; + } + + /** + * Determine the BBOX without considering the CRS or wrapping of the longitude. + * Note that incoming BBOX's that do cross the dateline (minx>maxx) will be treated as invalid. + */ + public static Optional visitCartesian(Geometry geometry) { + var visitor = new SpatialEnvelopeVisitor(new CartesianPointVisitor()); + if (geometry.visit(visitor)) { + return Optional.of(visitor.getResult()); + } + return Optional.empty(); + } + + /** + * Determine the BBOX assuming the CRS is geographic (eg WGS84) and optionally wrapping the longitude around the dateline. + */ + public static Optional visitGeo(Geometry geometry, boolean wrapLongitude) { + var visitor = new SpatialEnvelopeVisitor(new GeoPointVisitor(wrapLongitude)); + if (geometry.visit(visitor)) { + return Optional.of(visitor.getResult()); + } + return Optional.empty(); + } + + public Rectangle getResult() { + return pointVisitor.getResult(); + } + + /** + * Visitor for visiting points and rectangles. This is where the actual envelope calculation happens. + * There are two implementations, one for cartesian coordinates and one for geographic coordinates. + * The latter can optionally wrap the longitude around the dateline. + */ + public interface PointVisitor { + void visitPoint(double x, double y); + + void visitRectangle(double minX, double maxX, double maxY, double minY); + + boolean isValid(); + + Rectangle getResult(); + } + + /** + * The cartesian point visitor determines the envelope by the minimum and maximum x/y coordinates. + * It also disallows invalid rectangles where minX > maxX. + */ + public static class CartesianPointVisitor implements PointVisitor { + private double minX = Double.POSITIVE_INFINITY; + private double minY = Double.POSITIVE_INFINITY; + private double maxX = Double.NEGATIVE_INFINITY; + private double maxY = Double.NEGATIVE_INFINITY; + + public double getMinX() { + return minX; + } + + public double getMinY() { + return minY; + } + + public double getMaxX() { + return maxX; + } + + public double getMaxY() { + return maxY; + } + + @Override + public void visitPoint(double x, double y) { + minX = Math.min(minX, x); + minY = Math.min(minY, y); + maxX = Math.max(maxX, x); + maxY = Math.max(maxY, y); + } + + @Override + public void visitRectangle(double minX, double maxX, double maxY, double minY) { + if (minX > maxX) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Invalid cartesian rectangle: minX (%s) > maxX (%s)", minX, maxX) + ); + } + this.minX = Math.min(this.minX, minX); + this.minY = Math.min(this.minY, minY); + this.maxX = Math.max(this.maxX, maxX); + this.maxY = Math.max(this.maxY, maxY); + } + + @Override + public boolean isValid() { + return minY != Double.POSITIVE_INFINITY; + } + + @Override + public Rectangle getResult() { + return new Rectangle(minX, maxX, maxY, minY); + } + } + + /** + * The geographic point visitor determines the envelope by the minimum and maximum x/y coordinates, + * while allowing for wrapping the longitude around the dateline. + * When longitude wrapping is enabled, the visitor will determine the smallest bounding box between the two choices: + *
    + *
  • Wrapping around the front of the earth, in which case the result will have minx < maxx
  • + *
  • Wrapping around the back of the earth, crossing the dateline, in which case the result will have minx > maxx
  • + *
+ */ + public static class GeoPointVisitor implements PointVisitor { + private double minY = Double.POSITIVE_INFINITY; + private double maxY = Double.NEGATIVE_INFINITY; + private double minNegX = Double.POSITIVE_INFINITY; + private double maxNegX = Double.NEGATIVE_INFINITY; + private double minPosX = Double.POSITIVE_INFINITY; + private double maxPosX = Double.NEGATIVE_INFINITY; + + public double getMinY() { + return minY; + } + + public double getMaxY() { + return maxY; + } + + public double getMinNegX() { + return minNegX; + } + + public double getMaxNegX() { + return maxNegX; + } + + public double getMinPosX() { + return minPosX; + } + + public double getMaxPosX() { + return maxPosX; + } + + private final boolean wrapLongitude; + + public GeoPointVisitor(boolean wrapLongitude) { + this.wrapLongitude = wrapLongitude; + } + + @Override + public void visitPoint(double x, double y) { + minY = Math.min(minY, y); + maxY = Math.max(maxY, y); + visitLongitude(x); + } + + @Override + public void visitRectangle(double minX, double maxX, double maxY, double minY) { + this.minY = Math.min(this.minY, minY); + this.maxY = Math.max(this.maxY, maxY); + visitLongitude(minX); + visitLongitude(maxX); + } + + private void visitLongitude(double x) { + if (x >= 0) { + minPosX = Math.min(minPosX, x); + maxPosX = Math.max(maxPosX, x); + } else { + minNegX = Math.min(minNegX, x); + maxNegX = Math.max(maxNegX, x); + } + } + + @Override + public boolean isValid() { + return minY != Double.POSITIVE_INFINITY; + } + + @Override + public Rectangle getResult() { + return getResult(minNegX, minPosX, maxNegX, maxPosX, maxY, minY, wrapLongitude); + } + + private static Rectangle getResult( + double minNegX, + double minPosX, + double maxNegX, + double maxPosX, + double maxY, + double minY, + boolean wrapLongitude + ) { + assert Double.isFinite(maxY); + if (Double.isInfinite(minPosX)) { + return new Rectangle(minNegX, maxNegX, maxY, minY); + } else if (Double.isInfinite(minNegX)) { + return new Rectangle(minPosX, maxPosX, maxY, minY); + } else if (wrapLongitude) { + double unwrappedWidth = maxPosX - minNegX; + double wrappedWidth = (180 - minPosX) - (-180 - maxNegX); + if (unwrappedWidth <= wrappedWidth) { + return new Rectangle(minNegX, maxPosX, maxY, minY); + } else { + return new Rectangle(minPosX, maxNegX, maxY, minY); + } + } else { + return new Rectangle(minNegX, maxPosX, maxY, minY); + } + } + } + + private boolean isValid() { + return pointVisitor.isValid(); + } + + @Override + public Boolean visit(Circle circle) throws RuntimeException { + // TODO: Support circle, if given CRS (needed for radius to x/y coordinate transformation) + throw new UnsupportedOperationException("Circle is not supported"); + } + + @Override + public Boolean visit(GeometryCollection collection) throws RuntimeException { + collection.forEach(geometry -> geometry.visit(this)); + return isValid(); + } + + @Override + public Boolean visit(Line line) throws RuntimeException { + for (int i = 0; i < line.length(); i++) { + pointVisitor.visitPoint(line.getX(i), line.getY(i)); + } + return isValid(); + } + + @Override + public Boolean visit(LinearRing ring) throws RuntimeException { + for (int i = 0; i < ring.length(); i++) { + pointVisitor.visitPoint(ring.getX(i), ring.getY(i)); + } + return isValid(); + } + + @Override + public Boolean visit(MultiLine multiLine) throws RuntimeException { + multiLine.forEach(line -> line.visit(this)); + return isValid(); + } + + @Override + public Boolean visit(MultiPoint multiPoint) throws RuntimeException { + for (int i = 0; i < multiPoint.size(); i++) { + visit(multiPoint.get(i)); + } + return isValid(); + } + + @Override + public Boolean visit(MultiPolygon multiPolygon) throws RuntimeException { + multiPolygon.forEach(polygon -> polygon.visit(this)); + return isValid(); + } + + @Override + public Boolean visit(Point point) throws RuntimeException { + pointVisitor.visitPoint(point.getX(), point.getY()); + return isValid(); + } + + @Override + public Boolean visit(Polygon polygon) throws RuntimeException { + visit(polygon.getPolygon()); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + visit(polygon.getHole(i)); + } + return isValid(); + } + + @Override + public Boolean visit(Rectangle rectangle) throws RuntimeException { + pointVisitor.visitRectangle(rectangle.getMinX(), rectangle.getMaxX(), rectangle.getMaxY(), rectangle.getMinY()); + return isValid(); + } +} diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitorTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitorTests.java new file mode 100644 index 0000000000000..fc35df295e566 --- /dev/null +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitorTests.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.geometry.utils; + +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Rectangle; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SpatialEnvelopeVisitorTests extends ESTestCase { + + public void testVisitCartesianShape() { + for (int i = 0; i < 1000; i++) { + var geometry = ShapeTestUtils.randomGeometryWithoutCircle(0, false); + var bbox = SpatialEnvelopeVisitor.visitCartesian(geometry); + assertNotNull(bbox); + assertTrue(i + ": " + geometry, bbox.isPresent()); + var result = bbox.get(); + assertThat(i + ": " + geometry, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + geometry, result.getMinY(), lessThanOrEqualTo(result.getMaxY())); + } + } + + public void testVisitGeoShapeNoWrap() { + for (int i = 0; i < 1000; i++) { + var geometry = GeometryTestUtils.randomGeometryWithoutCircle(0, false); + var bbox = SpatialEnvelopeVisitor.visitGeo(geometry, false); + assertNotNull(bbox); + assertTrue(i + ": " + geometry, bbox.isPresent()); + var result = bbox.get(); + assertThat(i + ": " + geometry, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + geometry, result.getMinY(), lessThanOrEqualTo(result.getMaxY())); + } + } + + public void testVisitGeoShapeWrap() { + for (int i = 0; i < 1000; i++) { + var geometry = GeometryTestUtils.randomGeometryWithoutCircle(0, true); + var bbox = SpatialEnvelopeVisitor.visitGeo(geometry, false); + assertNotNull(bbox); + assertTrue(i + ": " + geometry, bbox.isPresent()); + var result = bbox.get(); + assertThat(i + ": " + geometry, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + geometry, result.getMinY(), lessThanOrEqualTo(result.getMaxY())); + } + } + + public void testVisitCartesianPoints() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.CartesianPointVisitor()); + double minX = Double.MAX_VALUE; + double minY = Double.MAX_VALUE; + double maxX = -Double.MAX_VALUE; + double maxY = -Double.MAX_VALUE; + for (int i = 0; i < 1000; i++) { + var x = randomFloat(); + var y = randomFloat(); + var point = new Point(x, y); + visitor.visit(point); + minX = Math.min(minX, x); + minY = Math.min(minY, y); + maxX = Math.max(maxX, x); + maxY = Math.max(maxY, y); + var result = visitor.getResult(); + assertThat(i + ": " + point, result.getMinX(), equalTo(minX)); + assertThat(i + ": " + point, result.getMinY(), equalTo(minY)); + assertThat(i + ": " + point, result.getMaxX(), equalTo(maxX)); + assertThat(i + ": " + point, result.getMaxY(), equalTo(maxY)); + } + } + + public void testVisitGeoPointsNoWrapping() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.GeoPointVisitor(false)); + double minY = Double.MAX_VALUE; + double maxY = -Double.MAX_VALUE; + double minX = Double.MAX_VALUE; + double maxX = -Double.MAX_VALUE; + for (int i = 0; i < 1000; i++) { + var point = GeometryTestUtils.randomPoint(); + visitor.visit(point); + minY = Math.min(minY, point.getY()); + maxY = Math.max(maxY, point.getY()); + minX = Math.min(minX, point.getX()); + maxX = Math.max(maxX, point.getX()); + var result = visitor.getResult(); + assertThat(i + ": " + point, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + point, result.getMinX(), equalTo(minX)); + assertThat(i + ": " + point, result.getMinY(), equalTo(minY)); + assertThat(i + ": " + point, result.getMaxX(), equalTo(maxX)); + assertThat(i + ": " + point, result.getMaxY(), equalTo(maxY)); + } + } + + public void testVisitGeoPointsWrapping() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.GeoPointVisitor(true)); + double minY = Double.POSITIVE_INFINITY; + double maxY = Double.NEGATIVE_INFINITY; + double minNegX = Double.POSITIVE_INFINITY; + double maxNegX = Double.NEGATIVE_INFINITY; + double minPosX = Double.POSITIVE_INFINITY; + double maxPosX = Double.NEGATIVE_INFINITY; + for (int i = 0; i < 1000; i++) { + var point = GeometryTestUtils.randomPoint(); + visitor.visit(point); + minY = Math.min(minY, point.getY()); + maxY = Math.max(maxY, point.getY()); + if (point.getX() >= 0) { + minPosX = Math.min(minPosX, point.getX()); + maxPosX = Math.max(maxPosX, point.getX()); + } else { + minNegX = Math.min(minNegX, point.getX()); + maxNegX = Math.max(maxNegX, point.getX()); + } + var result = visitor.getResult(); + if (Double.isInfinite(minPosX)) { + // Only negative x values were considered + assertRectangleResult(i + ": " + point, result, minNegX, maxNegX, maxY, minY, false); + } else if (Double.isInfinite(minNegX)) { + // Only positive x values were considered + assertRectangleResult(i + ": " + point, result, minPosX, maxPosX, maxY, minY, false); + } else { + // Both positive and negative x values exist, we need to decide which way to wrap the bbox + double unwrappedWidth = maxPosX - minNegX; + double wrappedWidth = (180 - minPosX) - (-180 - maxNegX); + if (unwrappedWidth <= wrappedWidth) { + // The smaller bbox is around the front of the planet, no dateline wrapping required + assertRectangleResult(i + ": " + point, result, minNegX, maxPosX, maxY, minY, false); + } else { + // The smaller bbox is around the back of the planet, dateline wrapping required (minx > maxx) + assertRectangleResult(i + ": " + point, result, minPosX, maxNegX, maxY, minY, true); + } + } + } + } + + public void testWillCrossDateline() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.GeoPointVisitor(true)); + visitor.visit(new Point(-90.0, 0.0)); + visitor.visit(new Point(90.0, 0.0)); + assertCrossesDateline(visitor, false); + visitor.visit(new Point(-89.0, 0.0)); + visitor.visit(new Point(89.0, 0.0)); + assertCrossesDateline(visitor, false); + visitor.visit(new Point(-100.0, 0.0)); + visitor.visit(new Point(100.0, 0.0)); + assertCrossesDateline(visitor, true); + visitor.visit(new Point(-70.0, 0.0)); + visitor.visit(new Point(70.0, 0.0)); + assertCrossesDateline(visitor, false); + visitor.visit(new Point(-120.0, 0.0)); + visitor.visit(new Point(120.0, 0.0)); + assertCrossesDateline(visitor, true); + } + + private void assertCrossesDateline(SpatialEnvelopeVisitor visitor, boolean crossesDateline) { + var result = visitor.getResult(); + if (crossesDateline) { + assertThat("Crosses dateline, minx>maxx", result.getMinX(), greaterThanOrEqualTo(result.getMaxX())); + } else { + assertThat("Does not cross dateline, minx // Something has changed with response codes task.skipTest("search.aggregation/20_terms/IP test", "Hybrid t-digest produces different results.") + // Maths changed + task.skipTest("aggregations/moving_fn/linearWeightedAvg", "math was wrong in previous versions") task.addAllowedWarningRegex("\\[types removal\\].*") } diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/moving_fn.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/moving_fn.yml index cd6feb601b1df..3abad87d57907 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/moving_fn.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/moving_fn.yml @@ -255,6 +255,17 @@ linearWeightedAvg: - skip: features: close_to + - requires: + test_runner_features: [capabilities] + + - requires: + capabilities: + - method: POST + path: /_search + parameters: [method, path, parameters, capabilities] + capabilities: [moving_fn_right_math] + reason: "math not fixed yet" + - do: search: index: no_gaps @@ -275,11 +286,11 @@ linearWeightedAvg: - match: { hits.total.value: 6 } - length: { aggregations.@timestamp.buckets: 6 } - is_false: aggregations.@timestamp.buckets.0.d.value - - close_to: { aggregations.@timestamp.buckets.1.d.value: { value: 0.500, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.2.d.value: { value: 1.250, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.3.d.value: { value: 1.000, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.4.d.value: { value: 2.250, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.5.d.value: { value: 3.500, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.1.d.value: { value: 1.000, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.2.d.value: { value: 1.667, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.3.d.value: { value: 1.333, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.4.d.value: { value: 3.000, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.5.d.value: { value: 4.667, error: 0.0005 } } - do: search: @@ -301,11 +312,11 @@ linearWeightedAvg: - match: { hits.total.value: 6 } - length: { aggregations.@timestamp.buckets: 6 } - is_false: aggregations.@timestamp.buckets.0.d.value - - close_to: { aggregations.@timestamp.buckets.1.d.value: { value: 0.500, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.2.d.value: { value: 1.250, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.3.d.value: { value: 1.143, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.4.d.value: { value: 2.286, error: 0.0005 } } - - close_to: { aggregations.@timestamp.buckets.5.d.value: { value: 3.429, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.1.d.value: { value: 1.000, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.2.d.value: { value: 1.667, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.3.d.value: { value: 1.333, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.4.d.value: { value: 2.667, error: 0.0005 } } + - close_to: { aggregations.@timestamp.buckets.5.d.value: { value: 4.000, error: 0.0005 } } --- ewma: diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java index a79ae4de7cc66..4f0bf83000642 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.s3; import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; @@ -37,9 +38,8 @@ public class RepositoryS3EcsCredentialsRestIT extends AbstractRepositoryS3RestTe private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - Ec2ImdsVersion.V1, - dynamicS3Credentials::addValidCredentials, - Set.of("/ecs_credentials_endpoint") + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicS3Credentials::addValidCredentials) + .alternativeCredentialsEndpoints(Set.of("/ecs_credentials_endpoint")) ); private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java index ead91981b3fa8..dcdf52e963eef 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.s3; import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; @@ -23,8 +24,6 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; -import java.util.Set; - @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3ImdsV1CredentialsRestIT extends AbstractRepositoryS3RestTestCase { @@ -37,9 +36,7 @@ public class RepositoryS3ImdsV1CredentialsRestIT extends AbstractRepositoryS3Res private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - Ec2ImdsVersion.V1, - dynamicS3Credentials::addValidCredentials, - Set.of() + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicS3Credentials::addValidCredentials) ); private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java index 67adb096bd1ba..434fc9720fc29 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.s3; import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; @@ -23,8 +24,6 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; -import java.util.Set; - @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3RestTestCase { @@ -37,9 +36,7 @@ public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3Res private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - Ec2ImdsVersion.V2, - dynamicS3Credentials::addValidCredentials, - Set.of() + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).newCredentialsConsumer(dynamicS3Credentials::addValidCredentials) ); private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); diff --git a/muted-tests.yml b/muted-tests.yml index 85ea63928dc69..240d9d245eee5 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -153,12 +153,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testCantCreateJobWithSameID issue: https://github.com/elastic/elasticsearch/issues/113581 -- class: org.elasticsearch.integration.KibanaUserRoleIntegTests - method: testFieldMappings - issue: https://github.com/elastic/elasticsearch/issues/113592 -- class: org.elasticsearch.integration.KibanaUserRoleIntegTests - method: testSearchAndMSearch - issue: https://github.com/elastic/elasticsearch/issues/113593 - class: org.elasticsearch.xpack.transform.integration.TransformIT method: testStopWaitForCheckpoint issue: https://github.com/elastic/elasticsearch/issues/106113 @@ -305,15 +299,6 @@ tests: - class: org.elasticsearch.search.basic.SearchWithRandomIOExceptionsIT method: testRandomDirectoryIOExceptions issue: https://github.com/elastic/elasticsearch/issues/114824 -- class: org.elasticsearch.xpack.application.CohereServiceUpgradeIT - method: testRerank {upgradedNodes=1} - issue: https://github.com/elastic/elasticsearch/issues/116973 -- class: org.elasticsearch.xpack.application.CohereServiceUpgradeIT - method: testCohereEmbeddings {upgradedNodes=1} - issue: https://github.com/elastic/elasticsearch/issues/116974 -- class: org.elasticsearch.xpack.application.CohereServiceUpgradeIT - method: testCohereEmbeddings {upgradedNodes=2} - issue: https://github.com/elastic/elasticsearch/issues/116975 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/117027 @@ -388,8 +373,11 @@ tests: - class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" method: "test {scoring.*}" issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {scoring.QstrWithFieldAndScoringSortedEval} +- class: "org.elasticsearch.xpack.esql.qa.mixed.MultiClusterEsqlSpecIT" + method: "test {scoring.*}" + issue: https://github.com/elastic/elasticsearch/issues/118460 +- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" + method: "test {scoring.*}" issue: https://github.com/elastic/elasticsearch/issues/117751 - class: org.elasticsearch.search.ccs.CrossClusterIT method: testCancel @@ -414,21 +402,6 @@ tests: - class: org.elasticsearch.upgrades.HealthNodeUpgradeIT method: testHealthNode {upgradedNodes=2} issue: https://github.com/elastic/elasticsearch/issues/118158 -- class: org.elasticsearch.xpack.application.OpenAiServiceUpgradeIT - method: testOpenAiEmbeddings {upgradedNodes=1} - issue: https://github.com/elastic/elasticsearch/issues/118156 -- class: org.elasticsearch.xpack.application.HuggingFaceServiceUpgradeIT - method: testElser {upgradedNodes=1} - issue: https://github.com/elastic/elasticsearch/issues/118127 -- class: org.elasticsearch.xpack.application.OpenAiServiceUpgradeIT - method: testOpenAiCompletions {upgradedNodes=1} - issue: https://github.com/elastic/elasticsearch/issues/118162 -- class: org.elasticsearch.xpack.application.OpenAiServiceUpgradeIT - method: testOpenAiCompletions {upgradedNodes=2} - issue: https://github.com/elastic/elasticsearch/issues/118163 -- class: org.elasticsearch.xpack.application.OpenAiServiceUpgradeIT - method: testOpenAiEmbeddings {upgradedNodes=2} - issue: https://github.com/elastic/elasticsearch/issues/118204 - class: org.elasticsearch.xpack.migrate.action.ReindexDataStreamTransportActionIT method: testNonExistentDataStream issue: https://github.com/elastic/elasticsearch/issues/118275 @@ -438,18 +411,44 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultRerank issue: https://github.com/elastic/elasticsearch/issues/118184 -- class: org.elasticsearch.xpack.application.HuggingFaceServiceUpgradeIT - method: testHFEmbeddings {upgradedNodes=1} - issue: https://github.com/elastic/elasticsearch/issues/118197 - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 -- class: org.elasticsearch.upgrades.FileSettingsRoleMappingUpgradeIT - method: testRoleMappingsAppliedOnUpgrade {upgradedNodes=3} - issue: https://github.com/elastic/elasticsearch/issues/118311 -- class: org.elasticsearch.upgrades.FileSettingsRoleMappingUpgradeIT - method: testRoleMappingsAppliedOnUpgrade {upgradedNodes=1} - issue: https://github.com/elastic/elasticsearch/issues/118309 -- class: org.elasticsearch.upgrades.FileSettingsRoleMappingUpgradeIT - method: testRoleMappingsAppliedOnUpgrade {upgradedNodes=2} - issue: https://github.com/elastic/elasticsearch/issues/118310 +- class: org.elasticsearch.repositories.blobstore.testkit.analyze.MinioRepositoryAnalysisRestIT + issue: https://github.com/elastic/elasticsearch/issues/118548 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Existing Data Stream} + issue: https://github.com/elastic/elasticsearch/issues/118575 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/20_reindex_status/Test Reindex With Existing Data Stream} + issue: https://github.com/elastic/elasticsearch/issues/118576 +- class: org.elasticsearch.discovery.ec2.DiscoveryEc2AvailabilityZoneAttributeNoImdsIT + method: testAvailabilityZoneAttribute + issue: https://github.com/elastic/elasticsearch/issues/118564 +- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests + method: testRetryPointInTime + issue: https://github.com/elastic/elasticsearch/issues/118514 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByDateAndKeywordAndIntWithAlias SYNC} + issue: https://github.com/elastic/elasticsearch/issues/118668 +- class: org.elasticsearch.xpack.application.OpenAiServiceUpgradeIT + method: testOpenAiEmbeddings {upgradedNodes=1} + issue: https://github.com/elastic/elasticsearch/issues/118156 +- class: org.elasticsearch.xpack.application.HuggingFaceServiceUpgradeIT + method: testHFEmbeddings {upgradedNodes=1} + issue: https://github.com/elastic/elasticsearch/issues/118197 +- class: org.elasticsearch.xpack.application.OpenAiServiceUpgradeIT + method: testOpenAiCompletions {upgradedNodes=2} + issue: https://github.com/elastic/elasticsearch/issues/118163 +- class: org.elasticsearch.xpack.application.OpenAiServiceUpgradeIT + method: testOpenAiCompletions {upgradedNodes=1} + issue: https://github.com/elastic/elasticsearch/issues/118162 +- class: org.elasticsearch.xpack.application.HuggingFaceServiceUpgradeIT + method: testElser {upgradedNodes=1} + issue: https://github.com/elastic/elasticsearch/issues/118127 +- class: org.elasticsearch.xpack.application.CohereServiceUpgradeIT + method: testCohereEmbeddings {upgradedNodes=1} + issue: https://github.com/elastic/elasticsearch/issues/116974 +- class: org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests + method: testPruneSome + issue: https://github.com/elastic/elasticsearch/issues/118728 diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 591c42c0b803c..e8390fc3b1f0f 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -28,6 +28,9 @@ dependencies { api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" api "joda-time:joda-time:2.10.10" + + javaRestTestImplementation project(':plugins:discovery-ec2') + javaRestTestImplementation project(':test:fixtures:ec2-imds-fixture') } tasks.named("dependencyLicenses").configure { diff --git a/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeImdsV1IT.java b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeImdsV1IT.java new file mode 100644 index 0000000000000..32291236ea158 --- /dev/null +++ b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeImdsV1IT.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.discovery.ec2; + +import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsServiceBuilder; +import fixture.aws.imds.Ec2ImdsVersion; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class DiscoveryEc2AvailabilityZoneAttributeImdsV1IT extends DiscoveryEc2AvailabilityZoneAttributeTestCase { + private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).availabilityZoneSupplier( + DiscoveryEc2AvailabilityZoneAttributeTestCase::getAvailabilityZone + ) + ); + + public static ElasticsearchCluster cluster = buildCluster(ec2ImdsHttpFixture::getAddress); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(ec2ImdsHttpFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeImdsV2IT.java b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeImdsV2IT.java new file mode 100644 index 0000000000000..8b785d688e7c4 --- /dev/null +++ b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeImdsV2IT.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.discovery.ec2; + +import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsServiceBuilder; +import fixture.aws.imds.Ec2ImdsVersion; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class DiscoveryEc2AvailabilityZoneAttributeImdsV2IT extends DiscoveryEc2AvailabilityZoneAttributeTestCase { + private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).availabilityZoneSupplier( + DiscoveryEc2AvailabilityZoneAttributeTestCase::getAvailabilityZone + ) + ); + + public static ElasticsearchCluster cluster = buildCluster(ec2ImdsHttpFixture::getAddress); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(ec2ImdsHttpFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeNoImdsIT.java b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeNoImdsIT.java new file mode 100644 index 0000000000000..73213090b6f93 --- /dev/null +++ b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeNoImdsIT.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.discovery.ec2; + +import com.amazonaws.util.EC2MetadataUtils; + +import org.elasticsearch.client.Request; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; + +public class DiscoveryEc2AvailabilityZoneAttributeNoImdsIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .plugin("discovery-ec2") + .setting(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), "true") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testAvailabilityZoneAttribute() throws IOException { + assumeTrue("test only in non-AWS environment", EC2MetadataUtils.getInstanceId() == null); + + final var nodesInfoResponse = assertOKAndCreateObjectPath(client().performRequest(new Request("GET", "/_nodes/_all/_none"))); + for (final var nodeId : nodesInfoResponse.evaluateMapKeys("nodes")) { + assertNull(nodesInfoResponse.evaluateExact("nodes", nodeId, "attributes", "aws_availability_zone")); + } + } +} diff --git a/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeTestCase.java b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeTestCase.java new file mode 100644 index 0000000000000..7eb18eec5c0b9 --- /dev/null +++ b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeTestCase.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.discovery.ec2; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Objects; +import java.util.Set; +import java.util.function.Supplier; + +public abstract class DiscoveryEc2AvailabilityZoneAttributeTestCase extends ESRestTestCase { + + private static final Set createdAvailabilityZones = ConcurrentCollections.newConcurrentSet(); + + protected static String getAvailabilityZone() { + final var zoneName = randomIdentifier(); + createdAvailabilityZones.add(zoneName); + return zoneName; + } + + protected static ElasticsearchCluster buildCluster(Supplier imdsFixtureAddressSupplier) { + return ElasticsearchCluster.local() + .plugin("discovery-ec2") + .setting(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), "true") + .systemProperty("com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", imdsFixtureAddressSupplier) + .build(); + } + + public void testAvailabilityZoneAttribute() throws IOException { + final var nodesInfoResponse = assertOKAndCreateObjectPath(client().performRequest(new Request("GET", "/_nodes/_all/_none"))); + for (final var nodeId : nodesInfoResponse.evaluateMapKeys("nodes")) { + assertThat( + createdAvailabilityZones, + Matchers.hasItem( + Objects.requireNonNull(nodesInfoResponse.evaluateExact("nodes", nodeId, "attributes", "aws_availability_zone")) + ) + ); + } + } +} diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java index bd291dd14aeb8..d4414f580887a 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java @@ -16,8 +16,9 @@ import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; -import org.junit.Before; +import org.elasticsearch.test.junit.RunnableTestRuleAdapter; import org.junit.ClassRule; import org.junit.rules.RuleChain; import org.junit.rules.TemporaryFolder; @@ -70,9 +71,15 @@ public String get() { .setting("xpack.security.authc.anonymous.roles", "superuser") .configFile("operator/settings.json", Resource.fromString(SETTING_JSON)) .build(); + private static final RunnableTestRuleAdapter versionLimit = new RunnableTestRuleAdapter( + () -> assumeTrue( + "Only relevant when upgrading from a version before role mappings were stored in cluster state", + getOldClusterTestVersion().after(new Version(8, 7, 0)) && getOldClusterTestVersion().before(new Version(8, 15, 0)) + ) + ); @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + public static TestRule ruleChain = RuleChain.outerRule(versionLimit).around(repoDirectory).around(cluster); public FileSettingsRoleMappingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); @@ -83,14 +90,6 @@ protected ElasticsearchCluster getUpgradeCluster() { return cluster; } - @Before - public void checkVersions() { - assumeTrue( - "Only relevant when upgrading from a version before role mappings were stored in cluster state", - oldClusterHasFeature("gte_v8.7.0") && oldClusterHasFeature("gte_v8.15.0") == false - ); - } - private static void waitForSecurityMigrationCompletionIfIndexExists() throws Exception { final Request request = new Request("GET", "_cluster/state/metadata/.security-7"); assertBusy(() -> { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.cancel_reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.cancel_reindex.json new file mode 100644 index 0000000000000..a034f204edbfb --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.cancel_reindex.json @@ -0,0 +1,30 @@ +{ + "migrate.cancel_reindex":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API returns the status of a migration reindex attempt for a data stream or index" + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_migration/reindex/{index}/_cancel", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"string", + "description":"The index or data stream name" + } + } + } + ] + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index c56bc201e7f86..a72ff4f514115 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -556,11 +556,7 @@ public void testUpdateSettings() { } public void testSearchQueryThenFetch() throws Exception { - interceptTransportActions( - SearchTransportService.QUERY_ACTION_NAME, - SearchTransportService.FETCH_ID_ACTION_NAME, - SearchTransportService.FREE_CONTEXT_ACTION_NAME - ); + interceptTransportActions(SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME); String[] randomIndicesOrAliases = randomIndicesOrAliases(); for (int i = 0; i < randomIndicesOrAliases.length; i++) { @@ -580,16 +576,13 @@ public void testSearchQueryThenFetch() throws Exception { SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME ); - // free context messages are not necessarily sent, but if they are, check their indices - assertIndicesSubsetOptionalRequests(Arrays.asList(searchRequest.indices()), SearchTransportService.FREE_CONTEXT_ACTION_NAME); } public void testSearchDfsQueryThenFetch() throws Exception { interceptTransportActions( SearchTransportService.DFS_ACTION_NAME, SearchTransportService.QUERY_ID_ACTION_NAME, - SearchTransportService.FETCH_ID_ACTION_NAME, - SearchTransportService.FREE_CONTEXT_ACTION_NAME + SearchTransportService.FETCH_ID_ACTION_NAME ); String[] randomIndicesOrAliases = randomIndicesOrAliases(); @@ -611,8 +604,6 @@ public void testSearchDfsQueryThenFetch() throws Exception { SearchTransportService.QUERY_ID_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME ); - // free context messages are not necessarily sent, but if they are, check their indices - assertIndicesSubsetOptionalRequests(Arrays.asList(searchRequest.indices()), SearchTransportService.FREE_CONTEXT_ACTION_NAME); } private static void assertSameIndices(IndicesRequest originalRequest, String... actions) { diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 7092d7f30f1b9..5f23316b31988 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -474,5 +474,5 @@ exports org.elasticsearch.lucene.spatial; exports org.elasticsearch.inference.configuration; exports org.elasticsearch.monitor.metrics; - + exports org.elasticsearch.plugins.internal.rewriter to org.elasticsearch.inference; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7151791d0519a..e37a83a26fefa 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -144,6 +144,8 @@ static TransportVersion def(int id) { public static final TransportVersion RETRIES_AND_OPERATIONS_IN_BLOBSTORE_STATS = def(8_804_00_0); public static final TransportVersion ADD_DATA_STREAM_OPTIONS_TO_TEMPLATES = def(8_805_00_0); public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_806_00_0); + public static final TransportVersion SEMANTIC_QUERY_LENIENT = def(8_807_00_0); + public static final TransportVersion ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS = def(8_808_00_0); /* * STOP! READ THIS FIRST! No, really, @@ -208,7 +210,7 @@ static TransportVersion def(int id) { * Reference to the minimum transport version that can be used with CCS. * This should be the transport version used by the previous minor release. */ - public static final TransportVersion MINIMUM_CCS_VERSION = V_8_15_2; + public static final TransportVersion MINIMUM_CCS_VERSION = SOURCE_MODE_TELEMETRY_FIX_8_17; static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 9ae0636752b98..cc7a28f5ee078 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -190,11 +190,11 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); public static final Version V_8_15_5 = new Version(8_15_05_99); - public static final Version V_8_15_6 = new Version(8_15_06_99); public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_16_2 = new Version(8_16_02_99); public static final Version V_8_17_0 = new Version(8_17_00_99); + public static final Version V_8_17_1 = new Version(8_17_01_99); public static final Version V_8_18_0 = new Version(8_18_00_99); public static final Version CURRENT = V_8_18_0; diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 96aa459fd998a..219e4a78ace46 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -736,7 +736,7 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) { try { SearchShardTarget searchShardTarget = entry.getSearchShardTarget(); Transport.Connection connection = getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()); - sendReleaseSearchContext(entry.getContextId(), connection, getOriginalIndices(entry.getShardIndex())); + sendReleaseSearchContext(entry.getContextId(), connection); } catch (Exception inner) { inner.addSuppressed(exception); logger.trace("failed to release context", inner); @@ -752,10 +752,10 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) { * @see org.elasticsearch.search.fetch.FetchSearchResult#getContextId() * */ - void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection, OriginalIndices originalIndices) { + void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection) { assert isPartOfPointInTime(contextId) == false : "Must not release point in time context [" + contextId + "]"; if (connection != null) { - searchTransportService.sendFreeContext(connection, contextId, originalIndices); + searchTransportService.sendFreeContext(connection, contextId, ActionListener.noop()); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 285dd0a22fd7e..cc8c4becea9a9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -119,11 +119,7 @@ public void onFailure(Exception exception) { // the query might not have been executed at all (for example because thread pool rejected // execution) and the search context that was created in dfs phase might not be released. // release it again to be in the safe side - context.sendReleaseSearchContext( - querySearchRequest.contextId(), - connection, - context.getOriginalIndices(shardIndex) - ); + context.sendReleaseSearchContext(querySearchRequest.contextId(), connection); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index bdb48646f3fb2..cc92eaca21ca7 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -92,11 +92,7 @@ protected static void releaseIrrelevantSearchContext(SearchPhaseResult searchPha context.getLogger().trace("trying to release search context [{}]", phaseResult.getContextId()); SearchShardTarget shardTarget = phaseResult.getSearchShardTarget(); Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); - context.sendReleaseSearchContext( - phaseResult.getContextId(), - connection, - context.getOriginalIndices(phaseResult.getShardIndex()) - ); + context.sendReleaseSearchContext(phaseResult.getContextId(), connection); } catch (Exception e) { context.getLogger().trace("failed to release context", e); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 8444a92b24432..cfc2e1bcdaf2b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -13,12 +13,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction; import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -124,24 +122,6 @@ public SearchTransportService( this.responseWrapper = responseWrapper; } - private static final ActionListenerResponseHandler SEND_FREE_CONTEXT_LISTENER = - new ActionListenerResponseHandler<>( - ActionListener.noop(), - SearchFreeContextResponse::readFrom, - TransportResponseHandler.TRANSPORT_WORKER - ); - - public void sendFreeContext(Transport.Connection connection, final ShardSearchContextId contextId, OriginalIndices originalIndices) { - transportService.sendRequest( - connection, - FREE_CONTEXT_ACTION_NAME, - new SearchFreeContextRequest(originalIndices, contextId), - TransportRequestOptions.EMPTY, - // no need to respond if it was freed or not - SEND_FREE_CONTEXT_LISTENER - ); - } - public void sendFreeContext( Transport.Connection connection, ShardSearchContextId contextId, @@ -370,43 +350,6 @@ private static class ClearScrollContextsRequest extends TransportRequest { } } - static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest { - private final OriginalIndices originalIndices; - - SearchFreeContextRequest(OriginalIndices originalIndices, ShardSearchContextId id) { - super(id); - this.originalIndices = originalIndices; - } - - SearchFreeContextRequest(StreamInput in) throws IOException { - super(in); - originalIndices = OriginalIndices.readOriginalIndices(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - OriginalIndices.writeOriginalIndices(originalIndices, out); - } - - @Override - public String[] indices() { - if (originalIndices == null) { - return null; - } - return originalIndices.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - if (originalIndices == null) { - return null; - } - return originalIndices.indicesOptions(); - } - - } - public static class SearchFreeContextResponse extends TransportResponse { private static final SearchFreeContextResponse FREED = new SearchFreeContextResponse(true); @@ -456,12 +399,13 @@ public static void registerRequestHandler(TransportService transportService, Sea SearchFreeContextResponse::readFrom ); - transportService.registerRequestHandler( - FREE_CONTEXT_ACTION_NAME, - freeContextExecutor, - SearchFreeContextRequest::new, - freeContextHandler - ); + // TODO: remove this handler once the lowest compatible version stops using it + transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, freeContextExecutor, in -> { + var res = new ScrollFreeContextRequest(in); + // this handler exists for BwC purposes only, we don't need the original indices to free the context + OriginalIndices.readOriginalIndices(in); + return res; + }, freeContextHandler); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, false, SearchFreeContextResponse::readFrom); transportService.registerRequestHandler( diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 27cbb39c05d38..b7081fd1688a5 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -30,7 +30,6 @@ import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.Tuple; import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; @@ -56,6 +55,7 @@ import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; import static org.elasticsearch.bootstrap.BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING; import static org.elasticsearch.nativeaccess.WindowsFunctions.ConsoleCtrlHandler.CTRL_CLOSE_EVENT; @@ -209,10 +209,14 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { if (Boolean.parseBoolean(System.getProperty("es.entitlements.enabled"))) { LogManager.getLogger(Elasticsearch.class).info("Bootstrapping Entitlements"); - List> pluginData = pluginsLoader.allBundles() - .stream() - .map(bundle -> Tuple.tuple(bundle.getDir(), bundle.pluginDescriptor().isModular())) - .toList(); + List pluginData = Stream.concat( + pluginsLoader.moduleBundles() + .stream() + .map(bundle -> new EntitlementBootstrap.PluginData(bundle.getDir(), bundle.pluginDescriptor().isModular(), false)), + pluginsLoader.pluginBundles() + .stream() + .map(bundle -> new EntitlementBootstrap.PluginData(bundle.getDir(), bundle.pluginDescriptor().isModular(), true)) + ).toList(); EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index e7914d812e05c..2ce91b66fa789 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -66,6 +66,7 @@ * Note: This class is performance sensitive, so we pay extra attention on the data structure usage and we avoid streams and iterators * when possible in favor of the classic for-i loops. */ +@SuppressWarnings("ForLoopReplaceableByForEach") public class IndexNameExpressionResolver { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexNameExpressionResolver.class); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index c1ba2bd710931..2a7209a2e5e96 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1656,23 +1656,11 @@ static void prepareResizeIndexSettings( throw new IllegalStateException("unknown resize type is " + type); } - final Settings.Builder builder = Settings.builder(); + final Settings.Builder builder; if (copySettings) { - // copy all settings and non-copyable settings and settings that have already been set (e.g., from the request) - for (final String key : sourceMetadata.getSettings().keySet()) { - final Setting setting = indexScopedSettings.get(key); - if (setting == null) { - assert indexScopedSettings.isPrivateSetting(key) : key; - } else if (setting.getProperties().contains(Setting.Property.NotCopyableOnResize)) { - continue; - } - // do not override settings that have already been set (for example, from the request) - if (indexSettingsBuilder.keys().contains(key)) { - continue; - } - builder.copy(key, sourceMetadata.getSettings()); - } + builder = copySettingsFromSource(true, sourceMetadata.getSettings(), indexScopedSettings, indexSettingsBuilder); } else { + builder = Settings.builder(); final Predicate sourceSettingsPredicate = (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.") @@ -1690,6 +1678,36 @@ static void prepareResizeIndexSettings( } } + public static Settings.Builder copySettingsFromSource( + boolean copyPrivateSettings, + Settings sourceSettings, + IndexScopedSettings indexScopedSettings, + Settings.Builder indexSettingsBuilder + ) { + final Settings.Builder builder = Settings.builder(); + for (final String key : sourceSettings.keySet()) { + final Setting setting = indexScopedSettings.get(key); + if (setting == null) { + assert indexScopedSettings.isPrivateSetting(key) : key; + if (copyPrivateSettings == false) { + continue; + } + } else if (setting.getProperties().contains(Setting.Property.NotCopyableOnResize)) { + continue; + } else if (setting.isPrivateIndex()) { + if (copyPrivateSettings == false) { + continue; + } + } + // do not override settings that have already been set (for example, from the request) + if (indexSettingsBuilder.keys().contains(key)) { + continue; + } + builder.copy(key, sourceSettings); + } + return builder; + } + /** * Returns a default number of routing shards based on the number of shards of the index. The default number of routing shards will * allow any index to be split at least once and at most 10 times by a factor of two. The closer the number or shards gets to 1024 diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index aee60c3eda57f..39acc6d3f6311 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DocumentMapper; @@ -250,9 +251,11 @@ static void prepareBackingIndex( DataStreamFailureStoreDefinition.applyFailureStoreSettings(nodeSettings, settingsUpdate); } - imb.settings(settingsUpdate.build()) - .settingsVersion(im.getSettingsVersion() + 1) - .mappingVersion(im.getMappingVersion() + 1) + Settings maybeUpdatedSettings = settingsUpdate.build(); + if (IndexSettings.same(im.getSettings(), maybeUpdatedSettings) == false) { + imb.settings(maybeUpdatedSettings).settingsVersion(im.getSettingsVersion() + 1); + } + imb.mappingVersion(im.getMappingVersion() + 1) .mappingsUpdatedVersion(IndexVersion.current()) .putMapping(new MappingMetadata(mapper)); b.put(imb); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java index 2e78cc6f516b1..6a5aa2943de92 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.xcontent.ToXContent; +import java.util.Collections; import java.util.Iterator; public enum ChunkedToXContentHelper { @@ -53,6 +54,14 @@ public static Iterator field(String name, String value) { return Iterators.single(((builder, params) -> builder.field(name, value))); } + public static Iterator optionalField(String name, String value) { + if (value == null) { + return Collections.emptyIterator(); + } else { + return field(name, value); + } + } + /** * Creates an Iterator of a single ToXContent object that serializes the given object as a single chunk. Just wraps {@link * Iterators#single}, but still useful because it avoids any type ambiguity. diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index ce7eb9b911fb3..35d83586ce177 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -59,6 +59,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.threadpool.ThreadPool; @@ -483,7 +484,8 @@ public IndexService newIndexService( IdFieldMapper idFieldMapper, ValuesSourceRegistry valuesSourceRegistry, IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener, - Map snapshotCommitSuppliers + Map snapshotCommitSuppliers, + QueryRewriteInterceptor queryRewriteInterceptor ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -545,7 +547,8 @@ public IndexService newIndexService( indexFoldersDeletionListener, snapshotCommitSupplier, indexCommitListener.get(), - mapperMetrics + mapperMetrics, + queryRewriteInterceptor ); success = true; return indexService; diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 571bbd76a49dd..a5b3991d89bc4 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -85,6 +85,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.threadpool.ThreadPool; @@ -162,6 +163,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final Supplier indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; private final MapperMetrics mapperMetrics; + private final QueryRewriteInterceptor queryRewriteInterceptor; @SuppressWarnings("this-escape") public IndexService( @@ -196,7 +198,8 @@ public IndexService( IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener, IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier, Engine.IndexCommitListener indexCommitListener, - MapperMetrics mapperMetrics + MapperMetrics mapperMetrics, + QueryRewriteInterceptor queryRewriteInterceptor ) { super(indexSettings); assert indexCreationContext != IndexCreationContext.RELOAD_ANALYZERS @@ -271,6 +274,7 @@ public IndexService( this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); this.indexCommitListener = indexCommitListener; this.mapperMetrics = mapperMetrics; + this.queryRewriteInterceptor = queryRewriteInterceptor; try (var ignored = threadPool.getThreadContext().clearTraceContext()) { // kick off async ops for the first shard in this index this.refreshTask = new AsyncRefreshTask(this); @@ -802,6 +806,7 @@ public QueryRewriteContext newQueryRewriteContext( allowExpensiveQueries, scriptService, null, + null, null ); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index cc565be67f2c6..6a1eb07062f18 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -123,6 +123,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID_BACKPORT = def(8_520_00_0, Version.LUCENE_9_12_0); public static final IndexVersion DEPRECATE_SOURCE_MODE_MAPPER = def(8_521_00_0, Version.LUCENE_9_12_0); public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BACKPORT = def(8_522_00_0, Version.LUCENE_9_12_0); + public static final IndexVersion UPGRADE_TO_LUCENE_9_12_1 = def(8_523_00_0, Version.LUCENE_9_12_1); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index f00e6904feac7..05262798bac2a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.FilterXContentParser; import org.elasticsearch.xcontent.FilterXContentParserWrapper; @@ -278,6 +279,14 @@ protected static List readQueries(StreamInput in) throws IOExcepti @Override public final QueryBuilder rewrite(QueryRewriteContext queryRewriteContext) throws IOException { + QueryRewriteInterceptor queryRewriteInterceptor = queryRewriteContext.getQueryRewriteInterceptor(); + if (queryRewriteInterceptor != null) { + var rewritten = queryRewriteInterceptor.interceptAndRewrite(queryRewriteContext, this); + if (rewritten != this) { + return new InterceptedQueryBuilderWrapper(rewritten); + } + } + QueryBuilder rewritten = doRewrite(queryRewriteContext); if (rewritten == this) { return rewritten; diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index b0d3065ba3a3f..e166731d47057 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -104,6 +104,7 @@ public CoordinatorRewriteContext( null, null, null, + null, null ); this.dateFieldRangeInfo = dateFieldRangeInfo; diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java index aacb4b4129c73..31bc7dddacb7f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java @@ -66,6 +66,9 @@ public InnerHitBuilder innerHitBuilder() { public static void extractInnerHits(QueryBuilder query, Map innerHitBuilders) { if (query instanceof AbstractQueryBuilder) { ((AbstractQueryBuilder) query).extractInnerHitBuilders(innerHitBuilders); + } else if (query instanceof InterceptedQueryBuilderWrapper interceptedQuery) { + // Unwrap an intercepted query here + extractInnerHits(interceptedQuery.queryBuilder, innerHitBuilders); } else { throw new IllegalStateException( "provided query builder [" + query.getClass() + "] class should inherit from AbstractQueryBuilder, but it doesn't" diff --git a/server/src/main/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapper.java b/server/src/main/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapper.java new file mode 100644 index 0000000000000..b1030e4a76d97 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapper.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Wrapper for instances of {@link QueryBuilder} that have been intercepted using the {@link QueryRewriteInterceptor} to + * break out of the rewrite phase. These instances are unwrapped on serialization. + */ +class InterceptedQueryBuilderWrapper implements QueryBuilder { + + protected final QueryBuilder queryBuilder; + + InterceptedQueryBuilderWrapper(QueryBuilder queryBuilder) { + super(); + this.queryBuilder = queryBuilder; + } + + @Override + public QueryBuilder rewrite(QueryRewriteContext queryRewriteContext) throws IOException { + QueryRewriteInterceptor queryRewriteInterceptor = queryRewriteContext.getQueryRewriteInterceptor(); + try { + queryRewriteContext.setQueryRewriteInterceptor(null); + QueryBuilder rewritten = queryBuilder.rewrite(queryRewriteContext); + return rewritten != queryBuilder ? new InterceptedQueryBuilderWrapper(rewritten) : this; + } finally { + queryRewriteContext.setQueryRewriteInterceptor(queryRewriteInterceptor); + } + } + + @Override + public String getWriteableName() { + return queryBuilder.getWriteableName(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return queryBuilder.getMinimalSupportedVersion(); + } + + @Override + public Query toQuery(SearchExecutionContext context) throws IOException { + return queryBuilder.toQuery(context); + } + + @Override + public QueryBuilder queryName(String queryName) { + queryBuilder.queryName(queryName); + return this; + } + + @Override + public String queryName() { + return queryBuilder.queryName(); + } + + @Override + public float boost() { + return queryBuilder.boost(); + } + + @Override + public QueryBuilder boost(float boost) { + queryBuilder.boost(boost); + return this; + } + + @Override + public String getName() { + return queryBuilder.getName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + queryBuilder.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return queryBuilder.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof InterceptedQueryBuilderWrapper == false) return false; + return Objects.equals(queryBuilder, ((InterceptedQueryBuilderWrapper) o).queryBuilder); + } + + @Override + public int hashCode() { + return Objects.hashCode(queryBuilder); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index fce74aa60ab16..265a0c52593bd 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -70,6 +71,7 @@ public class QueryRewriteContext { protected Predicate allowedFields; private final ResolvedIndices resolvedIndices; private final PointInTimeBuilder pit; + private QueryRewriteInterceptor queryRewriteInterceptor; public QueryRewriteContext( final XContentParserConfiguration parserConfiguration, @@ -86,7 +88,8 @@ public QueryRewriteContext( final BooleanSupplier allowExpensiveQueries, final ScriptCompiler scriptService, final ResolvedIndices resolvedIndices, - final PointInTimeBuilder pit + final PointInTimeBuilder pit, + final QueryRewriteInterceptor queryRewriteInterceptor ) { this.parserConfiguration = parserConfiguration; @@ -105,6 +108,7 @@ public QueryRewriteContext( this.scriptService = scriptService; this.resolvedIndices = resolvedIndices; this.pit = pit; + this.queryRewriteInterceptor = queryRewriteInterceptor; } public QueryRewriteContext(final XContentParserConfiguration parserConfiguration, final Client client, final LongSupplier nowInMillis) { @@ -123,6 +127,7 @@ public QueryRewriteContext(final XContentParserConfiguration parserConfiguration null, null, null, + null, null ); } @@ -132,7 +137,8 @@ public QueryRewriteContext( final Client client, final LongSupplier nowInMillis, final ResolvedIndices resolvedIndices, - final PointInTimeBuilder pit + final PointInTimeBuilder pit, + final QueryRewriteInterceptor queryRewriteInterceptor ) { this( parserConfiguration, @@ -149,7 +155,8 @@ public QueryRewriteContext( null, null, resolvedIndices, - pit + pit, + queryRewriteInterceptor ); } @@ -428,4 +435,13 @@ public String getTierPreference() { // It was decided we should only test the first of these potentially multiple preferences. return value.split(",")[0].trim(); } + + public QueryRewriteInterceptor getQueryRewriteInterceptor() { + return queryRewriteInterceptor; + } + + public void setQueryRewriteInterceptor(QueryRewriteInterceptor queryRewriteInterceptor) { + this.queryRewriteInterceptor = queryRewriteInterceptor; + } + } diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index fbc3696d40221..b2ee6134a7728 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -271,6 +271,7 @@ private SearchExecutionContext( allowExpensiveQueries, scriptService, null, + null, null ); this.shardId = shardId; diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java index 1edd69a6443a7..2486cc66fd4c9 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java @@ -171,6 +171,10 @@ public void reset() throws IOException { if (markedSlice < 0 || markedSliceOffset < 0) { throw new IOException("Mark has not been set"); } + if (initialized && nextSlice == markedSlice + 1 && currentSliceOffset == markedSliceOffset) { + // Reset at the marked offset should return immediately without re-opening the slice + return; + } nextSlice = markedSlice; initialized = true; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 87488de1a0e6a..e6d8290286a78 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -137,6 +137,7 @@ import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; @@ -265,6 +266,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CheckedBiConsumer requestCacheKeyDifferentiator; private final MapperMetrics mapperMetrics; private final List searchOperationListeners; + private final QueryRewriteInterceptor queryRewriteInterceptor; @Override protected void doStart() { @@ -333,6 +335,7 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(builder.indexFoldersDeletionListeners); this.snapshotCommitSuppliers = builder.snapshotCommitSuppliers; this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator; + this.queryRewriteInterceptor = builder.queryRewriteInterceptor; this.mapperMetrics = builder.mapperMetrics; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to @@ -781,7 +784,8 @@ private synchronized IndexService createIndexService( idFieldMappers.apply(idxSettings.getMode()), valuesSourceRegistry, indexFoldersDeletionListeners, - snapshotCommitSuppliers + snapshotCommitSuppliers, + queryRewriteInterceptor ); } @@ -1764,7 +1768,7 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set requestCacheKeyDifferentiator; MapperMetrics mapperMetrics; List searchOperationListener = List.of(); + QueryRewriteInterceptor queryRewriteInterceptor = null; public IndicesServiceBuilder settings(Settings settings) { this.settings = settings; @@ -239,6 +242,27 @@ public IndicesService build() { .flatMap(m -> m.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + var queryRewriteInterceptors = pluginsService.filterPlugins(SearchPlugin.class) + .map(SearchPlugin::getQueryRewriteInterceptors) + .flatMap(List::stream) + .collect(Collectors.toMap(QueryRewriteInterceptor::getQueryName, interceptor -> { + if (interceptor.getQueryName() == null) { + throw new IllegalArgumentException("QueryRewriteInterceptor [" + interceptor.getClass().getName() + "] requires name"); + } + return interceptor; + }, (a, b) -> { + throw new IllegalStateException( + "Conflicting rewrite interceptors [" + + a.getQueryName() + + "] found in [" + + a.getClass().getName() + + "] and [" + + b.getClass().getName() + + "]" + ); + })); + queryRewriteInterceptor = QueryRewriteInterceptor.multi(queryRewriteInterceptors); + return new IndicesService(this); } } diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index 4497254aad1f0..c2d690d8160ac 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -112,6 +112,23 @@ void infer( ); /** + * Perform completion inference on the model using the unified schema. + * + * @param model The model + * @param request Parameters for the request + * @param timeout The timeout for the request + * @param listener Inference result listener + */ + void unifiedCompletionInfer( + Model model, + UnifiedCompletionRequest request, + TimeValue timeout, + ActionListener listener + ); + + /** + * Chunk long text. + * * @param model The model * @param query Inference query, mainly for re-ranking * @param input Inference input diff --git a/server/src/main/java/org/elasticsearch/inference/TaskType.java b/server/src/main/java/org/elasticsearch/inference/TaskType.java index b0e5bababbbc0..fcb8ea7213795 100644 --- a/server/src/main/java/org/elasticsearch/inference/TaskType.java +++ b/server/src/main/java/org/elasticsearch/inference/TaskType.java @@ -38,6 +38,10 @@ public static TaskType fromString(String name) { } public static TaskType fromStringOrStatusException(String name) { + if (name == null) { + throw new ElasticsearchStatusException("Task type must not be null", RestStatus.BAD_REQUEST); + } + try { TaskType taskType = TaskType.fromString(name); return Objects.requireNonNull(taskType); diff --git a/server/src/main/java/org/elasticsearch/inference/UnifiedCompletionRequest.java b/server/src/main/java/org/elasticsearch/inference/UnifiedCompletionRequest.java new file mode 100644 index 0000000000000..e596be626b518 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/UnifiedCompletionRequest.java @@ -0,0 +1,425 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public record UnifiedCompletionRequest( + List messages, + @Nullable String model, + @Nullable Long maxCompletionTokens, + @Nullable List stop, + @Nullable Float temperature, + @Nullable ToolChoice toolChoice, + @Nullable List tools, + @Nullable Float topP +) implements Writeable { + + public sealed interface Content extends NamedWriteable permits ContentObjects, ContentString {} + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + UnifiedCompletionRequest.class.getSimpleName(), + args -> new UnifiedCompletionRequest( + (List) args[0], + (String) args[1], + (Long) args[2], + (List) args[3], + (Float) args[4], + (ToolChoice) args[5], + (List) args[6], + (Float) args[7] + ) + ); + + static { + PARSER.declareObjectArray(constructorArg(), Message.PARSER::apply, new ParseField("messages")); + PARSER.declareString(optionalConstructorArg(), new ParseField("model")); + PARSER.declareLong(optionalConstructorArg(), new ParseField("max_completion_tokens")); + PARSER.declareStringArray(optionalConstructorArg(), new ParseField("stop")); + PARSER.declareFloat(optionalConstructorArg(), new ParseField("temperature")); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> parseToolChoice(p), + new ParseField("tool_choice"), + ObjectParser.ValueType.OBJECT_OR_STRING + ); + PARSER.declareObjectArray(optionalConstructorArg(), Tool.PARSER::apply, new ParseField("tools")); + PARSER.declareFloat(optionalConstructorArg(), new ParseField("top_p")); + } + + public static List getNamedWriteables() { + return List.of( + new NamedWriteableRegistry.Entry(Content.class, ContentObjects.NAME, ContentObjects::new), + new NamedWriteableRegistry.Entry(Content.class, ContentString.NAME, ContentString::new), + new NamedWriteableRegistry.Entry(ToolChoice.class, ToolChoiceObject.NAME, ToolChoiceObject::new), + new NamedWriteableRegistry.Entry(ToolChoice.class, ToolChoiceString.NAME, ToolChoiceString::new) + ); + } + + public static UnifiedCompletionRequest of(List messages) { + return new UnifiedCompletionRequest(messages, null, null, null, null, null, null, null); + } + + public UnifiedCompletionRequest(StreamInput in) throws IOException { + this( + in.readCollectionAsImmutableList(Message::new), + in.readOptionalString(), + in.readOptionalVLong(), + in.readOptionalStringCollectionAsList(), + in.readOptionalFloat(), + in.readOptionalNamedWriteable(ToolChoice.class), + in.readOptionalCollectionAsList(Tool::new), + in.readOptionalFloat() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(messages); + out.writeOptionalString(model); + out.writeOptionalVLong(maxCompletionTokens); + out.writeOptionalStringCollection(stop); + out.writeOptionalFloat(temperature); + out.writeOptionalNamedWriteable(toolChoice); + out.writeOptionalCollection(tools); + out.writeOptionalFloat(topP); + } + + public record Message(Content content, String role, @Nullable String name, @Nullable String toolCallId, List toolCalls) + implements + Writeable { + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + Message.class.getSimpleName(), + args -> new Message((Content) args[0], (String) args[1], (String) args[2], (String) args[3], (List) args[4]) + ); + + static { + PARSER.declareField(constructorArg(), (p, c) -> parseContent(p), new ParseField("content"), ObjectParser.ValueType.VALUE_ARRAY); + PARSER.declareString(constructorArg(), new ParseField("role")); + PARSER.declareString(optionalConstructorArg(), new ParseField("name")); + PARSER.declareString(optionalConstructorArg(), new ParseField("tool_call_id")); + PARSER.declareObjectArray(optionalConstructorArg(), ToolCall.PARSER::apply, new ParseField("tool_calls")); + } + + private static Content parseContent(XContentParser parser) throws IOException { + var token = parser.currentToken(); + if (token == XContentParser.Token.START_ARRAY) { + var parsedContentObjects = XContentParserUtils.parseList(parser, (p) -> ContentObject.PARSER.apply(p, null)); + return new ContentObjects(parsedContentObjects); + } else if (token == XContentParser.Token.VALUE_STRING) { + return ContentString.of(parser); + } + + throw new XContentParseException("Expected an array start token or a value string token but found token [" + token + "]"); + } + + public Message(StreamInput in) throws IOException { + this( + in.readNamedWriteable(Content.class), + in.readString(), + in.readOptionalString(), + in.readOptionalString(), + in.readOptionalCollectionAsList(ToolCall::new) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(content); + out.writeString(role); + out.writeOptionalString(name); + out.writeOptionalString(toolCallId); + out.writeOptionalCollection(toolCalls); + } + } + + public record ContentObjects(List contentObjects) implements Content, NamedWriteable { + + public static final String NAME = "content_objects"; + + public ContentObjects(StreamInput in) throws IOException { + this(in.readCollectionAsImmutableList(ContentObject::new)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(contentObjects); + } + + @Override + public String getWriteableName() { + return NAME; + } + } + + public record ContentObject(String text, String type) implements Writeable { + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + ContentObject.class.getSimpleName(), + args -> new ContentObject((String) args[0], (String) args[1]) + ); + + static { + PARSER.declareString(constructorArg(), new ParseField("text")); + PARSER.declareString(constructorArg(), new ParseField("type")); + } + + public ContentObject(StreamInput in) throws IOException { + this(in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(text); + out.writeString(type); + } + + public String toString() { + return text + ":" + type; + } + + } + + public record ContentString(String content) implements Content, NamedWriteable { + public static final String NAME = "content_string"; + + public static ContentString of(XContentParser parser) throws IOException { + var content = parser.text(); + return new ContentString(content); + } + + public ContentString(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(content); + } + + @Override + public String getWriteableName() { + return NAME; + } + + public String toString() { + return content; + } + } + + public record ToolCall(String id, FunctionField function, String type) implements Writeable { + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + ToolCall.class.getSimpleName(), + args -> new ToolCall((String) args[0], (FunctionField) args[1], (String) args[2]) + ); + + static { + PARSER.declareString(constructorArg(), new ParseField("id")); + PARSER.declareObject(constructorArg(), FunctionField.PARSER::apply, new ParseField("function")); + PARSER.declareString(constructorArg(), new ParseField("type")); + } + + public ToolCall(StreamInput in) throws IOException { + this(in.readString(), new FunctionField(in), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + function.writeTo(out); + out.writeString(type); + } + + public record FunctionField(String arguments, String name) implements Writeable { + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "tool_call_function_field", + args -> new FunctionField((String) args[0], (String) args[1]) + ); + + static { + PARSER.declareString(constructorArg(), new ParseField("arguments")); + PARSER.declareString(constructorArg(), new ParseField("name")); + } + + public FunctionField(StreamInput in) throws IOException { + this(in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(arguments); + out.writeString(name); + } + } + } + + private static ToolChoice parseToolChoice(XContentParser parser) throws IOException { + var token = parser.currentToken(); + if (token == XContentParser.Token.START_OBJECT) { + return ToolChoiceObject.PARSER.apply(parser, null); + } else if (token == XContentParser.Token.VALUE_STRING) { + return ToolChoiceString.of(parser); + } + + throw new XContentParseException("Unsupported token [" + token + "]"); + } + + public sealed interface ToolChoice extends NamedWriteable permits ToolChoiceObject, ToolChoiceString {} + + public record ToolChoiceObject(String type, FunctionField function) implements ToolChoice, NamedWriteable { + + public static final String NAME = "tool_choice_object"; + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + ToolChoiceObject.class.getSimpleName(), + args -> new ToolChoiceObject((String) args[0], (FunctionField) args[1]) + ); + + static { + PARSER.declareString(constructorArg(), new ParseField("type")); + PARSER.declareObject(constructorArg(), FunctionField.PARSER::apply, new ParseField("function")); + } + + public ToolChoiceObject(StreamInput in) throws IOException { + this(in.readString(), new FunctionField(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(type); + function.writeTo(out); + } + + @Override + public String getWriteableName() { + return NAME; + } + + public record FunctionField(String name) implements Writeable { + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "tool_choice_function_field", + args -> new FunctionField((String) args[0]) + ); + + static { + PARSER.declareString(constructorArg(), new ParseField("name")); + } + + public FunctionField(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + } + } + } + + public record ToolChoiceString(String value) implements ToolChoice, NamedWriteable { + public static final String NAME = "tool_choice_string"; + + public static ToolChoiceString of(XContentParser parser) throws IOException { + var content = parser.text(); + return new ToolChoiceString(content); + } + + public ToolChoiceString(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + + @Override + public String getWriteableName() { + return NAME; + } + } + + public record Tool(String type, FunctionField function) implements Writeable { + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + Tool.class.getSimpleName(), + args -> new Tool((String) args[0], (FunctionField) args[1]) + ); + + static { + PARSER.declareString(constructorArg(), new ParseField("type")); + PARSER.declareObject(constructorArg(), FunctionField.PARSER::apply, new ParseField("function")); + } + + public Tool(StreamInput in) throws IOException { + this(in.readString(), new FunctionField(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(type); + function.writeTo(out); + } + + public record FunctionField( + @Nullable String description, + String name, + @Nullable Map parameters, + @Nullable Boolean strict + ) implements Writeable { + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "tool_function_field", + args -> new FunctionField((String) args[0], (String) args[1], (Map) args[2], (Boolean) args[3]) + ); + + static { + PARSER.declareString(optionalConstructorArg(), new ParseField("description")); + PARSER.declareString(constructorArg(), new ParseField("name")); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.mapOrdered(), new ParseField("parameters")); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("strict")); + } + + public FunctionField(StreamInput in) throws IOException { + this(in.readOptionalString(), in.readString(), in.readGenericMap(), in.readOptionalBoolean()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(description); + out.writeString(name); + out.writeGenericMap(parameters); + out.writeOptionalBoolean(strict); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java index aadda93f977b6..c7dc2c405ffba 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java @@ -122,7 +122,8 @@ public static LayerAndLoader ofUberModuleLoader(UberModuleClassLoader loader) { private final List moduleDescriptors; private final List pluginDescriptors; private final Map loadedPluginLayers; - private final Set allBundles; + private final Set moduleBundles; + private final Set pluginBundles; /** * Constructs a new PluginsLoader @@ -153,37 +154,36 @@ public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path plug Set seenBundles = new LinkedHashSet<>(); // load (elasticsearch) module layers - List moduleDescriptors; + final Set modules; if (modulesDirectory != null) { try { - Set modules = PluginsUtils.getModuleBundles(modulesDirectory); - moduleDescriptors = modules.stream().map(PluginBundle::pluginDescriptor).toList(); + modules = PluginsUtils.getModuleBundles(modulesDirectory); seenBundles.addAll(modules); } catch (IOException ex) { throw new IllegalStateException("Unable to initialize modules", ex); } } else { - moduleDescriptors = Collections.emptyList(); + modules = Collections.emptySet(); } // load plugin layers - List pluginDescriptors; + final Set plugins; if (pluginsDirectory != null) { try { // TODO: remove this leniency, but tests bogusly rely on it if (isAccessibleDirectory(pluginsDirectory, logger)) { PluginsUtils.checkForFailedPluginRemovals(pluginsDirectory); - Set plugins = PluginsUtils.getPluginBundles(pluginsDirectory); - pluginDescriptors = plugins.stream().map(PluginBundle::pluginDescriptor).toList(); + plugins = PluginsUtils.getPluginBundles(pluginsDirectory); + seenBundles.addAll(plugins); } else { - pluginDescriptors = Collections.emptyList(); + plugins = Collections.emptySet(); } } catch (IOException ex) { throw new IllegalStateException("Unable to initialize plugins", ex); } } else { - pluginDescriptors = Collections.emptyList(); + plugins = Collections.emptySet(); } Map loadedPluginLayers = new LinkedHashMap<>(); @@ -197,19 +197,15 @@ public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path plug } } - return new PluginsLoader(moduleDescriptors, pluginDescriptors, loadedPluginLayers, Set.copyOf(seenBundles)); + return new PluginsLoader(modules, plugins, loadedPluginLayers); } - PluginsLoader( - List moduleDescriptors, - List pluginDescriptors, - Map loadedPluginLayers, - Set allBundles - ) { - this.moduleDescriptors = moduleDescriptors; - this.pluginDescriptors = pluginDescriptors; + PluginsLoader(Set modules, Set plugins, Map loadedPluginLayers) { + this.moduleBundles = modules; + this.pluginBundles = plugins; + this.moduleDescriptors = modules.stream().map(PluginBundle::pluginDescriptor).toList(); + this.pluginDescriptors = plugins.stream().map(PluginBundle::pluginDescriptor).toList(); this.loadedPluginLayers = loadedPluginLayers; - this.allBundles = allBundles; } public List moduleDescriptors() { @@ -224,8 +220,12 @@ public Stream pluginLayers() { return loadedPluginLayers.values().stream().map(Function.identity()); } - public Set allBundles() { - return allBundles; + public Set moduleBundles() { + return moduleBundles; + } + + public Set pluginBundles() { + return pluginBundles; } private static void loadPluginLayer( @@ -416,7 +416,7 @@ static String toModuleName(String name) { return result; } - static final String toPackageName(String className) { + static String toPackageName(String className) { assert className.endsWith(".") == false; int index = className.lastIndexOf('.'); if (index == -1) { @@ -426,11 +426,11 @@ static final String toPackageName(String className) { } @SuppressForbidden(reason = "I need to convert URL's to Paths") - static final Path[] urlsToPaths(Set urls) { + static Path[] urlsToPaths(Set urls) { return urls.stream().map(PluginsLoader::uncheckedToURI).map(PathUtils::get).toArray(Path[]::new); } - static final URI uncheckedToURI(URL url) { + static URI uncheckedToURI(URL url) { try { return url.toURI(); } catch (URISyntaxException e) { diff --git a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java index f5670ebd8a543..e87e9ee85b29c 100644 --- a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.query.QueryParser; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionParser; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -128,6 +129,14 @@ default List> getQueries() { return emptyList(); } + /** + * @return Applicable {@link QueryRewriteInterceptor}s configured for this plugin. + * Note: This is internal to Elasticsearch's API and not extensible by external plugins. + */ + default List getQueryRewriteInterceptors() { + return emptyList(); + } + /** * The new {@link Aggregation}s added by this plugin. */ diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/rewriter/QueryRewriteInterceptor.java b/server/src/main/java/org/elasticsearch/plugins/internal/rewriter/QueryRewriteInterceptor.java new file mode 100644 index 0000000000000..8f4fb2ce7491a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/internal/rewriter/QueryRewriteInterceptor.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.plugins.internal.rewriter; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; + +import java.util.Map; + +/** + * Enables modules and plugins to intercept and rewrite queries during the query rewrite phase on the coordinator node. + */ +public interface QueryRewriteInterceptor { + + /** + * Intercepts and returns a rewritten query if modifications are required; otherwise, + * returns the same provided {@link QueryBuilder} instance unchanged. + * + * @param context the {@link QueryRewriteContext} providing the context for the rewrite operation + * @param queryBuilder the original {@link QueryBuilder} to potentially rewrite + * @return the rewritten {@link QueryBuilder}, or the original instance if no rewrite was needed + */ + QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder); + + /** + * Name of the query to be intercepted and rewritten. + */ + String getQueryName(); + + static QueryRewriteInterceptor multi(Map interceptors) { + return interceptors.isEmpty() ? new NoOpQueryRewriteInterceptor() : new CompositeQueryRewriteInterceptor(interceptors); + } + + class CompositeQueryRewriteInterceptor implements QueryRewriteInterceptor { + final String NAME = "composite"; + private final Map interceptors; + + private CompositeQueryRewriteInterceptor(Map interceptors) { + this.interceptors = interceptors; + } + + @Override + public String getQueryName() { + return NAME; + } + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + QueryRewriteInterceptor interceptor = interceptors.get(queryBuilder.getName()); + if (interceptor != null) { + return interceptor.interceptAndRewrite(context, queryBuilder); + } + return queryBuilder; + } + } + + class NoOpQueryRewriteInterceptor implements QueryRewriteInterceptor { + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + return queryBuilder; + } + + @Override + public String getQueryName() { + return null; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java index 7b6ee6f7806c0..7bcdd523fd3d3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java @@ -40,6 +40,8 @@ private SearchCapabilities() {} private static final String RANK_VECTORS_SCRIPT_ACCESS = "rank_vectors_script_access"; /** Initial support for rank-vectors maxSim functions access. */ private static final String RANK_VECTORS_SCRIPT_MAX_SIM = "rank_vectors_script_max_sim_with_bugfix"; + /** Fixed the math in {@code moving_fn}'s {@code linearWeightedAvg}. */ + private static final String MOVING_FN_RIGHT_MATH = "moving_fn_right_math"; private static final String RANDOM_SAMPLER_WITH_SCORED_SUBAGGS = "random_sampler_with_scored_subaggs"; private static final String OPTIMIZED_SCALAR_QUANTIZATION_BBQ = "optimized_scalar_quantization_bbq"; @@ -56,6 +58,7 @@ private SearchCapabilities() {} capabilities.add(RANDOM_SAMPLER_WITH_SCORED_SUBAGGS); capabilities.add(OPTIMIZED_SCALAR_QUANTIZATION_BBQ); capabilities.add(KNN_QUANTIZED_VECTOR_RESCORE); + capabilities.add(MOVING_FN_RIGHT_MATH); if (RankVectorsFieldMapper.FEATURE_FLAG.isEnabled()) { capabilities.add(RANK_VECTORS_FIELD_MAPPER); capabilities.add(RANK_VECTORS_SCRIPT_ACCESS); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index e62e8e2e9fed9..338c9c1485b66 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -150,6 +150,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiFunction; +import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -559,16 +561,17 @@ public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, // check if we can shortcut the query phase entirely. if (orig.canReturnNullResponseIfMatchNoDocs()) { assert orig.scroll() == null; - final CanMatchShardResponse canMatchResp; - try { - ShardSearchRequest clone = new ShardSearchRequest(orig); - canMatchResp = canMatch(clone, false); - } catch (Exception exc) { - l.onFailure(exc); - return; - } + ShardSearchRequest clone = new ShardSearchRequest(orig); + CanMatchContext canMatchContext = new CanMatchContext( + clone, + indicesService::indexServiceSafe, + this::findReaderContext, + defaultKeepAlive, + maxKeepAlive + ); + CanMatchShardResponse canMatchResp = canMatch(canMatchContext, false); if (canMatchResp.canMatch() == false) { - l.onResponse(QuerySearchResult.nullInstance()); + listener.onResponse(QuerySearchResult.nullInstance()); return; } } @@ -1201,10 +1204,14 @@ public void freeAllScrollContexts() { } private long getKeepAlive(ShardSearchRequest request) { + return getKeepAlive(request, defaultKeepAlive, maxKeepAlive); + } + + private static long getKeepAlive(ShardSearchRequest request, long defaultKeepAlive, long maxKeepAlive) { if (request.scroll() != null) { - return getScrollKeepAlive(request.scroll()); + return getScrollKeepAlive(request.scroll(), defaultKeepAlive, maxKeepAlive); } else if (request.keepAlive() != null) { - checkKeepAliveLimit(request.keepAlive().millis()); + checkKeepAliveLimit(request.keepAlive().millis(), maxKeepAlive); return request.keepAlive().getMillis(); } else { return request.readerId() == null ? defaultKeepAlive : -1; @@ -1212,14 +1219,22 @@ private long getKeepAlive(ShardSearchRequest request) { } private long getScrollKeepAlive(Scroll scroll) { + return getScrollKeepAlive(scroll, defaultKeepAlive, maxKeepAlive); + } + + private static long getScrollKeepAlive(Scroll scroll, long defaultKeepAlive, long maxKeepAlive) { if (scroll != null && scroll.keepAlive() != null) { - checkKeepAliveLimit(scroll.keepAlive().millis()); + checkKeepAliveLimit(scroll.keepAlive().millis(), maxKeepAlive); return scroll.keepAlive().getMillis(); } return defaultKeepAlive; } private void checkKeepAliveLimit(long keepAlive) { + checkKeepAliveLimit(keepAlive, maxKeepAlive); + } + + private static void checkKeepAliveLimit(long keepAlive, long maxKeepAlive) { if (keepAlive > maxKeepAlive) { throw new IllegalArgumentException( "Keep alive for request (" @@ -1678,6 +1693,7 @@ public void canMatch(CanMatchNodeRequest request, ActionListener responses = new ArrayList<>(shardLevelRequests.size()); for (var shardLevelRequest : shardLevelRequests) { try { + // TODO remove the exception handling as it's now in canMatch itself responses.add(new CanMatchNodeResponse.ResponseOrFailure(canMatch(request.createShardSearchRequest(shardLevelRequest)))); } catch (Exception e) { responses.add(new CanMatchNodeResponse.ResponseOrFailure(e)); @@ -1689,82 +1705,145 @@ public void canMatch(CanMatchNodeRequest request, ActionListener indexServiceLookup; + private final BiFunction findReaderContext; + private final long defaultKeepAlive; + private final long maxKeepAlive; + + private IndexService indexService; + + CanMatchContext( + ShardSearchRequest request, + Function indexServiceLookup, + BiFunction findReaderContext, + long defaultKeepAlive, + long maxKeepAlive + ) { + this.request = request; + this.indexServiceLookup = indexServiceLookup; + this.findReaderContext = findReaderContext; + this.defaultKeepAlive = defaultKeepAlive; + this.maxKeepAlive = maxKeepAlive; + } + + long getKeepAlive() { + return SearchService.getKeepAlive(request, defaultKeepAlive, maxKeepAlive); + } + + ReaderContext findReaderContext() { + return findReaderContext.apply(request.readerId(), request); + } + + QueryRewriteContext getQueryRewriteContext(IndexService indexService) { + return indexService.newQueryRewriteContext(request::nowInMillis, request.getRuntimeMappings(), request.getClusterAlias()); + } + + SearchExecutionContext getSearchExecutionContext(Engine.Searcher searcher) { + return getIndexService().newSearchExecutionContext( + request.shardId().id(), + 0, + searcher, + request::nowInMillis, + request.getClusterAlias(), + request.getRuntimeMappings() + ); + } + + IndexShard getShard() { + return getIndexService().getShard(request.shardId().getId()); + } + + IndexService getIndexService() { + if (this.indexService == null) { + this.indexService = indexServiceLookup.apply(request.shardId().getIndex()); + } + return this.indexService; + } + } + + static CanMatchShardResponse canMatch(CanMatchContext canMatchContext, boolean checkRefreshPending) { + assert canMatchContext.request.searchType() == SearchType.QUERY_THEN_FETCH + : "unexpected search type: " + canMatchContext.request.searchType(); Releasable releasable = null; try { IndexService indexService; final boolean hasRefreshPending; final Engine.Searcher canMatchSearcher; - if (request.readerId() != null) { + if (canMatchContext.request.readerId() != null) { hasRefreshPending = false; ReaderContext readerContext; Engine.Searcher searcher; try { - readerContext = findReaderContext(request.readerId(), request); - releasable = readerContext.markAsUsed(getKeepAlive(request)); + readerContext = canMatchContext.findReaderContext(); + releasable = readerContext.markAsUsed(canMatchContext.getKeepAlive()); indexService = readerContext.indexService(); - if (canMatchAfterRewrite(request, indexService) == false) { + QueryRewriteContext queryRewriteContext = canMatchContext.getQueryRewriteContext(indexService); + if (queryStillMatchesAfterRewrite(canMatchContext.request, queryRewriteContext) == false) { return new CanMatchShardResponse(false, null); } searcher = readerContext.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); } catch (SearchContextMissingException e) { - final String searcherId = request.readerId().getSearcherId(); + final String searcherId = canMatchContext.request.readerId().getSearcherId(); if (searcherId == null) { - throw e; + return new CanMatchShardResponse(true, null); } - indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - if (canMatchAfterRewrite(request, indexService) == false) { + if (queryStillMatchesAfterRewrite( + canMatchContext.request, + canMatchContext.getQueryRewriteContext(canMatchContext.getIndexService()) + ) == false) { return new CanMatchShardResponse(false, null); } - IndexShard indexShard = indexService.getShard(request.shardId().getId()); - final Engine.SearcherSupplier searcherSupplier = indexShard.acquireSearcherSupplier(); + final Engine.SearcherSupplier searcherSupplier = canMatchContext.getShard().acquireSearcherSupplier(); if (searcherId.equals(searcherSupplier.getSearcherId()) == false) { searcherSupplier.close(); - throw e; + return new CanMatchShardResponse(true, null); } releasable = searcherSupplier; searcher = searcherSupplier.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); } canMatchSearcher = searcher; } else { - indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - if (canMatchAfterRewrite(request, indexService) == false) { + if (queryStillMatchesAfterRewrite( + canMatchContext.request, + canMatchContext.getQueryRewriteContext(canMatchContext.getIndexService()) + ) == false) { return new CanMatchShardResponse(false, null); } - IndexShard indexShard = indexService.getShard(request.shardId().getId()); - boolean needsWaitForRefresh = request.waitForCheckpoint() != UNASSIGNED_SEQ_NO; + boolean needsWaitForRefresh = canMatchContext.request.waitForCheckpoint() != UNASSIGNED_SEQ_NO; // If this request wait_for_refresh behavior, it is safest to assume a refresh is pending. Theoretically, // this can be improved in the future by manually checking that the requested checkpoint has already been refresh. // However, this will request modifying the engine to surface that information. + IndexShard indexShard = canMatchContext.getShard(); hasRefreshPending = needsWaitForRefresh || (indexShard.hasRefreshPending() && checkRefreshPending); canMatchSearcher = indexShard.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); } try (canMatchSearcher) { - SearchExecutionContext context = indexService.newSearchExecutionContext( - request.shardId().id(), - 0, - canMatchSearcher, - request::nowInMillis, - request.getClusterAlias(), - request.getRuntimeMappings() - ); - final boolean canMatch = queryStillMatchesAfterRewrite(request, context); - final MinAndMax minMax; + SearchExecutionContext context = canMatchContext.getSearchExecutionContext(canMatchSearcher); + final boolean canMatch = queryStillMatchesAfterRewrite(canMatchContext.request, context); if (canMatch || hasRefreshPending) { - FieldSortBuilder sortBuilder = FieldSortBuilder.getPrimaryFieldSortOrNull(request.source()); - minMax = sortBuilder != null ? FieldSortBuilder.getMinMaxOrNull(context, sortBuilder) : null; - } else { - minMax = null; + FieldSortBuilder sortBuilder = FieldSortBuilder.getPrimaryFieldSortOrNull(canMatchContext.request.source()); + final MinAndMax minMax = sortBuilder != null ? FieldSortBuilder.getMinMaxOrNull(context, sortBuilder) : null; + return new CanMatchShardResponse(true, minMax); } - return new CanMatchShardResponse(canMatch || hasRefreshPending, minMax); + return new CanMatchShardResponse(false, null); } + } catch (Exception e) { + return new CanMatchShardResponse(true, null); } finally { Releasables.close(releasable); } @@ -1777,15 +1856,6 @@ private CanMatchShardResponse canMatch(ShardSearchRequest request, boolean check * {@link MatchNoneQueryBuilder}. This allows us to avoid extra work for example making the shard search active and waiting for * refreshes. */ - private static boolean canMatchAfterRewrite(final ShardSearchRequest request, final IndexService indexService) throws IOException { - final QueryRewriteContext queryRewriteContext = indexService.newQueryRewriteContext( - request::nowInMillis, - request.getRuntimeMappings(), - request.getClusterAlias() - ); - return queryStillMatchesAfterRewrite(request, queryRewriteContext); - } - @SuppressWarnings("unchecked") public static boolean queryStillMatchesAfterRewrite(ShardSearchRequest request, QueryRewriteContext context) throws IOException { Rewriteable.rewrite(request.getRewriteable(), context, false); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java index 02e3c76e5e793..46584c171d16c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java @@ -100,7 +100,7 @@ public static double stdDev(double[] values, double avg) { */ public static double linearWeightedAvg(double[] values) { double avg = 0; - long totalWeight = 1; + long totalWeight = 0; long current = 1; for (double v : values) { @@ -110,7 +110,7 @@ public static double linearWeightedAvg(double[] values) { current += 1; } } - return totalWeight == 1 ? Double.NaN : avg / totalWeight; + return totalWeight == 0 ? Double.NaN : avg / totalWeight; } /** diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 2cdb0a4879f87..22c31f6abc215 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -137,3 +137,4 @@ 8.15.5,8702003 8.16.0,8772001 8.16.1,8772004 +8.17.0,8797002 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index 58d0db5f6c3ef..2ee0b23bf050b 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -137,3 +137,4 @@ 8.15.5,8512000 8.16.0,8518000 8.16.1,8518000 +8.17.0,8521000 diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 725a4583d104a..71bf2a47cfa47 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -112,11 +112,7 @@ long buildTookInMillis() { } @Override - public void sendReleaseSearchContext( - ShardSearchContextId contextId, - Transport.Connection connection, - OriginalIndices originalIndices - ) { + public void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection) { releasedContexts.add(contextId); } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 7a38858d8477a..cf65d756811ad 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -155,7 +155,7 @@ protected void executePhaseOnShard( } @Override - public void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection, OriginalIndices originalIndices) { + public void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection) { releasedSearchContexts.add(contextId); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index b4ddd48172d01..2361beb7ad036 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -296,7 +296,11 @@ public void testFanOutAndCollect() throws InterruptedException { AtomicInteger numFreedContext = new AtomicInteger(); SearchTransportService transportService = new SearchTransportService(null, null, null) { @Override - public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, OriginalIndices originalIndices) { + public void sendFreeContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { numFreedContext.incrementAndGet(); assertTrue(nodeToContextMap.containsKey(connection.getNode())); assertTrue(nodeToContextMap.get(connection.getNode()).remove(contextId)); @@ -363,7 +367,7 @@ public void run() { for (int i = 0; i < results.getNumShards(); i++) { TestSearchPhaseResult result = results.getAtomicArray().get(i); assertEquals(result.node.getId(), result.getSearchShardTarget().getNodeId()); - sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node), OriginalIndices.NONE); + sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node)); } responseListener.onResponse(testResponse); if (latchTriggered.compareAndSet(false, true) == false) { @@ -421,8 +425,13 @@ public void testFanOutAndFail() throws InterruptedException { ); AtomicInteger numFreedContext = new AtomicInteger(); SearchTransportService transportService = new SearchTransportService(null, null, null) { + @Override - public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, OriginalIndices originalIndices) { + public void sendFreeContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { assertNotNull(contextId); numFreedContext.incrementAndGet(); assertTrue(nodeToContextMap.containsKey(connection.getNode())); diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index ed3d26141fe04..dcf64996e6617 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1744,7 +1744,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { NodeClient client = new NodeClient(settings, threadPool); SearchService searchService = mock(SearchService.class); - when(searchService.getRewriteContext(any(), any(), any())).thenReturn(new QueryRewriteContext(null, null, null, null, null)); + when(searchService.getRewriteContext(any(), any(), any())).thenReturn( + new QueryRewriteContext(null, null, null, null, null, null) + ); ClusterService clusterService = new ClusterService( settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 8036a964071d2..4abd0c4a9d469 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -690,7 +690,12 @@ public static Map randomInferenceFields() { } private static InferenceFieldMetadata randomInferenceFieldMetadata(String name) { - return new InferenceFieldMetadata(name, randomIdentifier(), randomSet(1, 5, ESTestCase::randomIdentifier).toArray(String[]::new)); + return new InferenceFieldMetadata( + name, + randomIdentifier(), + randomIdentifier(), + randomSet(1, 5, ESTestCase::randomIdentifier).toArray(String[]::new) + ); } private IndexMetadataStats randomIndexStats(int numberOfShards) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java index 8be0f4de15500..63e92835ba8db 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamServiceTests.java @@ -24,11 +24,13 @@ import java.io.IOException; import java.util.List; import java.util.Set; +import java.util.function.Function; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.generateMapping; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -425,6 +427,86 @@ public void testCreateDataStreamWithoutSuppliedWriteIndex() { assertThat(e.getMessage(), containsString("alias [" + dataStreamName + "] must specify a write index")); } + public void testSettingsVersion() throws IOException { + /* + * This tests that applyFailureStoreSettings updates the settings version when the settings have been modified, and does not change + * it otherwise. Incrementing the settings version when the settings have not changed can result in an assertion failing in + * IndexService::updateMetadata. + */ + String indexName = randomAlphaOfLength(30); + String dataStreamName = randomAlphaOfLength(50); + Function mapperSupplier = this::getMapperService; + boolean removeAlias = randomBoolean(); + boolean failureStore = randomBoolean(); + Settings nodeSettings = Settings.EMPTY; + + { + /* + * Here the input indexMetadata will have the index.hidden setting set to true. So we expect no change to the settings, and + * for the settings version to remain the same + */ + Metadata.Builder metadataBuilder = Metadata.builder(); + Settings indexMetadataSettings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexMetadataSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putMapping(getTestMappingWithTimestamp()) + .build(); + MetadataMigrateToDataStreamService.prepareBackingIndex( + metadataBuilder, + indexMetadata, + dataStreamName, + mapperSupplier, + removeAlias, + failureStore, + nodeSettings + ); + Metadata metadata = metadataBuilder.build(); + assertThat(indexMetadata.getSettings(), equalTo(metadata.index(indexName).getSettings())); + assertThat(metadata.index(indexName).getSettingsVersion(), equalTo(indexMetadata.getSettingsVersion())); + } + { + /* + * Here the input indexMetadata will not have the index.hidden setting set to true. So prepareBackingIndex will add that, + * meaning that the settings and settings version will change. + */ + Metadata.Builder metadataBuilder = Metadata.builder(); + Settings indexMetadataSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build(); + IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings(indexMetadataSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putMapping(getTestMappingWithTimestamp()) + .build(); + MetadataMigrateToDataStreamService.prepareBackingIndex( + metadataBuilder, + indexMetadata, + dataStreamName, + mapperSupplier, + removeAlias, + failureStore, + nodeSettings + ); + Metadata metadata = metadataBuilder.build(); + assertThat(indexMetadata.getSettings(), not(equalTo(metadata.index(indexName).getSettings()))); + assertThat(metadata.index(indexName).getSettingsVersion(), equalTo(indexMetadata.getSettingsVersion() + 1)); + } + } + + private String getTestMappingWithTimestamp() { + return """ + { + "properties": { + "@timestamp": {"type": "date"} + } + } + """; + } + private MapperService getMapperService(IndexMetadata im) { try { return createMapperService("{\"_doc\": " + im.mapping().source().toString() + "}"); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java b/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java index 4bea6f50c7c4b..b982bd7b95aad 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterIndexInput; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; @@ -267,17 +268,47 @@ public void testSkipBytes() throws Exception { skipBytesExpected ); - IndexInput input = dir.openInput("test", IOContext.DEFAULT); - InputStreamIndexInput is = new InputStreamIndexInput(input, limit); + var countingInput = new CountingReadBytesIndexInput("test", dir.openInput("test", IOContext.DEFAULT)); + InputStreamIndexInput is = new InputStreamIndexInput(countingInput, limit); is.readNBytes(initialReadBytes); assertThat(is.skip(skipBytes), equalTo((long) skipBytesExpected)); + long expectedActualInitialBytesRead = Math.min(Math.min(initialReadBytes, limit), bytes); + assertThat(countingInput.getBytesRead(), equalTo(expectedActualInitialBytesRead)); int remainingBytes = Math.min(bytes, limit) - seekExpected; for (int i = seekExpected; i < seekExpected + remainingBytes; i++) { assertThat(is.read(), equalTo(i)); } + assertThat(countingInput.getBytesRead(), equalTo(expectedActualInitialBytesRead + remainingBytes)); } + protected static class CountingReadBytesIndexInput extends FilterIndexInput { + private long bytesRead = 0; + + public CountingReadBytesIndexInput(String resourceDescription, IndexInput in) { + super(resourceDescription, in); + } + + @Override + public byte readByte() throws IOException { + long filePointerBefore = getFilePointer(); + byte b = super.readByte(); + bytesRead += getFilePointer() - filePointerBefore; + return b; + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + long filePointerBefore = getFilePointer(); + super.readBytes(b, offset, len); + bytesRead += getFilePointer() - filePointerBefore; + } + + public long getBytesRead() { + return bytesRead; + } + }; + public void testReadZeroShouldReturnZero() throws IOException { try (Directory dir = new ByteBuffersDirectory()) { try (IndexOutput output = dir.createOutput("test", IOContext.DEFAULT)) { diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 49a4d519c0ea4..c519d4834148d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -86,6 +86,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.internal.rewriter.MockQueryRewriteInterceptor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.test.ClusterServiceUtils; @@ -223,7 +224,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { module.indexSettings().getMode().idFieldMapperWithoutFieldData(), null, indexDeletionListener, - emptyMap() + emptyMap(), + new MockQueryRewriteInterceptor() ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java index 809fb161fcbe5..b1470c1ee5b3b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; -import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Map; @@ -94,6 +93,7 @@ private static class TestInferenceFieldMapper extends FieldMapper implements Inf public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); public static final String INFERENCE_ID = "test_inference_id"; + public static final String SEARCH_INFERENCE_ID = "test_search_inference_id"; public static final String CONTENT_TYPE = "test_inference_field"; TestInferenceFieldMapper(String simpleName) { @@ -102,7 +102,7 @@ private static class TestInferenceFieldMapper extends FieldMapper implements Inf @Override public InferenceFieldMetadata getMetadata(Set sourcePaths) { - return new InferenceFieldMetadata(fullPath(), INFERENCE_ID, sourcePaths.toArray(new String[0])); + return new InferenceFieldMetadata(fullPath(), INFERENCE_ID, SEARCH_INFERENCE_ID, sourcePaths.toArray(new String[0])); } @Override @@ -111,7 +111,7 @@ public Object getOriginalValue(Map sourceAsMap) { } @Override - protected void parseCreateField(DocumentParserContext context) throws IOException {} + protected void parseCreateField(DocumentParserContext context) {} @Override public Builder getMergeBuilder() { diff --git a/server/src/test/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapperTests.java b/server/src/test/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapperTests.java new file mode 100644 index 0000000000000..6c570e0e71725 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/InterceptedQueryBuilderWrapperTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +public class InterceptedQueryBuilderWrapperTests extends ESTestCase { + + private TestThreadPool threadPool; + private NoOpClient client; + + @Before + public void setup() { + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); + } + + @After + public void cleanup() { + threadPool.close(); + } + + public void testQueryNameReturnsWrappedQueryBuilder() { + MatchAllQueryBuilder matchAllQueryBuilder = new MatchAllQueryBuilder(); + InterceptedQueryBuilderWrapper interceptedQueryBuilderWrapper = new InterceptedQueryBuilderWrapper(matchAllQueryBuilder); + String queryName = randomAlphaOfLengthBetween(5, 10); + QueryBuilder namedQuery = interceptedQueryBuilderWrapper.queryName(queryName); + assertTrue(namedQuery instanceof InterceptedQueryBuilderWrapper); + assertEquals(queryName, namedQuery.queryName()); + } + + public void testQueryBoostReturnsWrappedQueryBuilder() { + MatchAllQueryBuilder matchAllQueryBuilder = new MatchAllQueryBuilder(); + InterceptedQueryBuilderWrapper interceptedQueryBuilderWrapper = new InterceptedQueryBuilderWrapper(matchAllQueryBuilder); + float boost = randomFloat(); + QueryBuilder boostedQuery = interceptedQueryBuilderWrapper.boost(boost); + assertTrue(boostedQuery instanceof InterceptedQueryBuilderWrapper); + assertEquals(boost, boostedQuery.boost(), 0.0001f); + } + + public void testRewrite() throws IOException { + QueryRewriteContext context = new QueryRewriteContext(null, client, null); + context.setQueryRewriteInterceptor(myMatchInterceptor); + + // Queries that are not intercepted behave normally + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("field", "value"); + QueryBuilder rewritten = termQueryBuilder.rewrite(context); + assertTrue(rewritten instanceof TermQueryBuilder); + + // Queries that should be intercepted are and the right thing happens + MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("field", "value"); + rewritten = matchQueryBuilder.rewrite(context); + assertTrue(rewritten instanceof InterceptedQueryBuilderWrapper); + assertTrue(((InterceptedQueryBuilderWrapper) rewritten).queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder rewrittenMatchQueryBuilder = (MatchQueryBuilder) ((InterceptedQueryBuilderWrapper) rewritten).queryBuilder; + assertEquals("intercepted", rewrittenMatchQueryBuilder.value()); + + // An additional rewrite on an already intercepted query returns the same query + QueryBuilder rewrittenAgain = rewritten.rewrite(context); + assertTrue(rewrittenAgain instanceof InterceptedQueryBuilderWrapper); + assertEquals(rewritten, rewrittenAgain); + } + + private final QueryRewriteInterceptor myMatchInterceptor = new QueryRewriteInterceptor() { + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + if (queryBuilder instanceof MatchQueryBuilder matchQueryBuilder) { + return new MatchQueryBuilder(matchQueryBuilder.fieldName(), "intercepted"); + } + return queryBuilder; + } + + @Override + public String getQueryName() { + return MatchQueryBuilder.NAME; + } + }; +} diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java index d07bcf54fdf09..5dd231ab97886 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java @@ -52,6 +52,7 @@ public void testGetTierPreference() { null, null, null, + null, null ); @@ -79,6 +80,7 @@ public void testGetTierPreference() { null, null, null, + null, null ); diff --git a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java index c31a68f36de71..256d0f269edb4 100644 --- a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -155,9 +155,10 @@ protected InputStream openSlice(int slice) throws IOException { // Mark input.mark(randomNonNegativeInt()); + int slicesOpenedAtMark = streamsOpened.size(); // Read or skip up to another random point - final int moreBytes = randomIntBetween(0, bytes.length - mark); + int moreBytes = randomIntBetween(0, bytes.length - mark); if (moreBytes > 0) { if (randomBoolean()) { final var moreBytesRead = new byte[moreBytes]; @@ -171,11 +172,13 @@ protected InputStream openSlice(int slice) throws IOException { // Randomly read to EOF if (randomBoolean()) { - input.readAllBytes(); + moreBytes += input.readAllBytes().length; } // Reset input.reset(); + int slicesOpenedAfterReset = streamsOpened.size(); + assert moreBytes > 0 || mark == 0 || slicesOpenedAfterReset == slicesOpenedAtMark : "Reset at mark should not re-open slices"; // Read all remaining bytes, which should be the bytes from mark up to the end final int remainingBytes = bytes.length - mark; diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java index 059cb15551acb..b7d63b7d612c9 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java @@ -9,12 +9,45 @@ package org.elasticsearch.plugins; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.Version; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.plugin.analysis.CharFilterFactory; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.PrivilegedOperations; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.URLClassLoader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import static java.util.Map.entry; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +@ESTestCase.WithoutSecurityManager +@LuceneTestCase.SuppressFileSystems(value = "ExtrasFS") public class PluginsLoaderTests extends ESTestCase { + private static final Logger logger = LogManager.getLogger(PluginsLoaderTests.class); + + static PluginsLoader newPluginsLoader(Settings settings) { + return PluginsLoader.createPluginsLoader(null, TestEnvironment.newEnvironment(settings).pluginsFile(), false); + } + public void testToModuleName() { assertThat(PluginsLoader.toModuleName("module.name"), equalTo("module.name")); assertThat(PluginsLoader.toModuleName("module-name"), equalTo("module.name")); @@ -28,4 +61,220 @@ public void testToModuleName() { assertThat(PluginsLoader.toModuleName("_module_name"), equalTo("_module_name")); assertThat(PluginsLoader.toModuleName("_"), equalTo("_")); } + + public void testStablePluginLoading() throws Exception { + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final Path plugins = home.resolve("plugins"); + final Path plugin = plugins.resolve("stable-plugin"); + Files.createDirectories(plugin); + PluginTestUtil.writeStablePluginProperties( + plugin, + "description", + "description", + "name", + "stable-plugin", + "version", + "1.0.0", + "elasticsearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version") + ); + + Path jar = plugin.resolve("impl.jar"); + JarUtils.createJarWithEntries(jar, Map.of("p/A.class", InMemoryJavaCompiler.compile("p.A", """ + package p; + import java.util.Map; + import org.elasticsearch.plugin.analysis.CharFilterFactory; + import org.elasticsearch.plugin.NamedComponent; + import java.io.Reader; + @NamedComponent( "a_name") + public class A implements CharFilterFactory { + @Override + public Reader create(Reader reader) { + return reader; + } + } + """))); + Path namedComponentFile = plugin.resolve("named_components.json"); + Files.writeString(namedComponentFile, """ + { + "org.elasticsearch.plugin.analysis.CharFilterFactory": { + "a_name": "p.A" + } + } + """); + + var pluginsLoader = newPluginsLoader(settings); + try { + var loadedLayers = pluginsLoader.pluginLayers().toList(); + + assertThat(loadedLayers, hasSize(1)); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().getName(), equalTo("stable-plugin")); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().isStable(), is(true)); + + assertThat(pluginsLoader.pluginDescriptors(), hasSize(1)); + assertThat(pluginsLoader.pluginDescriptors().get(0).getName(), equalTo("stable-plugin")); + assertThat(pluginsLoader.pluginDescriptors().get(0).isStable(), is(true)); + + var pluginClassLoader = loadedLayers.get(0).pluginClassLoader(); + var pluginModuleLayer = loadedLayers.get(0).pluginModuleLayer(); + assertThat(pluginClassLoader, instanceOf(UberModuleClassLoader.class)); + assertThat(pluginModuleLayer, is(not(ModuleLayer.boot()))); + assertThat(pluginModuleLayer.modules(), contains(transformedMatch(Module::getName, equalTo("synthetic.stable.plugin")))); + + if (CharFilterFactory.class.getModule().isNamed() == false) { + // test frameworks run with stable api classes on classpath, so we + // have no choice but to let our class read the unnamed module that + // owns the stable api classes + ((UberModuleClassLoader) pluginClassLoader).addReadsSystemClassLoaderUnnamedModule(); + } + + Class stableClass = pluginClassLoader.loadClass("p.A"); + assertThat(stableClass.getModule().getName(), equalTo("synthetic.stable.plugin")); + } finally { + closePluginLoaders(pluginsLoader); + } + } + + public void testModularPluginLoading() throws Exception { + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final Path plugins = home.resolve("plugins"); + final Path plugin = plugins.resolve("modular-plugin"); + Files.createDirectories(plugin); + PluginTestUtil.writePluginProperties( + plugin, + "description", + "description", + "name", + "modular-plugin", + "classname", + "p.A", + "modulename", + "modular.plugin", + "version", + "1.0.0", + "elasticsearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version") + ); + + Path jar = plugin.resolve("impl.jar"); + Map sources = Map.ofEntries(entry("module-info", "module modular.plugin { exports p; }"), entry("p.A", """ + package p; + import org.elasticsearch.plugins.Plugin; + + public class A extends Plugin { + } + """)); + + // Usually org.elasticsearch.plugins.Plugin would be in the org.elasticsearch.server module. + // Unfortunately, as tests run non-modular, it will be in the unnamed module, so we need to add a read for it. + var classToBytes = InMemoryJavaCompiler.compile(sources, "--add-reads", "modular.plugin=ALL-UNNAMED"); + + JarUtils.createJarWithEntries( + jar, + Map.ofEntries(entry("module-info.class", classToBytes.get("module-info")), entry("p/A.class", classToBytes.get("p.A"))) + ); + + var pluginsLoader = newPluginsLoader(settings); + try { + var loadedLayers = pluginsLoader.pluginLayers().toList(); + + assertThat(loadedLayers, hasSize(1)); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().getName(), equalTo("modular-plugin")); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().isStable(), is(false)); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().isModular(), is(true)); + + assertThat(pluginsLoader.pluginDescriptors(), hasSize(1)); + assertThat(pluginsLoader.pluginDescriptors().get(0).getName(), equalTo("modular-plugin")); + assertThat(pluginsLoader.pluginDescriptors().get(0).isModular(), is(true)); + + var pluginModuleLayer = loadedLayers.get(0).pluginModuleLayer(); + assertThat(pluginModuleLayer, is(not(ModuleLayer.boot()))); + assertThat(pluginModuleLayer.modules(), contains(transformedMatch(Module::getName, equalTo("modular.plugin")))); + } finally { + closePluginLoaders(pluginsLoader); + } + } + + public void testNonModularPluginLoading() throws Exception { + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final Path plugins = home.resolve("plugins"); + final Path plugin = plugins.resolve("non-modular-plugin"); + Files.createDirectories(plugin); + PluginTestUtil.writePluginProperties( + plugin, + "description", + "description", + "name", + "non-modular-plugin", + "classname", + "p.A", + "version", + "1.0.0", + "elasticsearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version") + ); + + Path jar = plugin.resolve("impl.jar"); + Map sources = Map.ofEntries(entry("p.A", """ + package p; + import org.elasticsearch.plugins.Plugin; + + public class A extends Plugin { + } + """)); + + var classToBytes = InMemoryJavaCompiler.compile(sources); + + JarUtils.createJarWithEntries(jar, Map.ofEntries(entry("p/A.class", classToBytes.get("p.A")))); + + var pluginsLoader = newPluginsLoader(settings); + try { + var loadedLayers = pluginsLoader.pluginLayers().toList(); + + assertThat(loadedLayers, hasSize(1)); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().getName(), equalTo("non-modular-plugin")); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().isStable(), is(false)); + assertThat(loadedLayers.get(0).pluginBundle().pluginDescriptor().isModular(), is(false)); + + assertThat(pluginsLoader.pluginDescriptors(), hasSize(1)); + assertThat(pluginsLoader.pluginDescriptors().get(0).getName(), equalTo("non-modular-plugin")); + assertThat(pluginsLoader.pluginDescriptors().get(0).isModular(), is(false)); + + var pluginModuleLayer = loadedLayers.get(0).pluginModuleLayer(); + assertThat(pluginModuleLayer, is(ModuleLayer.boot())); + } finally { + closePluginLoaders(pluginsLoader); + } + } + + // Closes the URLClassLoaders and UberModuleClassloaders created by the given plugin loader. + // We can use the direct ClassLoader from the plugin because tests do not use any parent SPI ClassLoaders. + static void closePluginLoaders(PluginsLoader pluginsLoader) { + pluginsLoader.pluginLayers().forEach(lp -> { + if (lp.pluginClassLoader() instanceof URLClassLoader urlClassLoader) { + try { + PrivilegedOperations.closeURLClassLoader(urlClassLoader); + } catch (IOException unexpected) { + throw new UncheckedIOException(unexpected); + } + } else if (lp.pluginClassLoader() instanceof UberModuleClassLoader loader) { + try { + PrivilegedOperations.closeURLClassLoader(loader.getInternalLoader()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + logger.info("Cannot close unexpected classloader " + lp.pluginClassLoader()); + } + }); + } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java new file mode 100644 index 0000000000000..1f2d129f0293c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java @@ -0,0 +1,3011 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.search; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHitCountCollectorManager; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClosePointInTimeRequest; +import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.SearchPhaseController; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.SearchShardTask; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.search.stats.SearchStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.settings.InternalOrPrivateSettingsPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.SearchService.ResultsType; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.aggregations.MultiBucketConsumerService; +import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.fetch.subphase.FieldAndFormat; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.internal.ReaderContext; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.NonCountingTermQuery; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rank.RankShardResult; +import org.elasticsearch.search.rank.TestRankBuilder; +import org.elasticsearch.search.rank.TestRankShardResult; +import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.search.rank.feature.RankFeatureResult; +import org.elasticsearch.search.rank.feature.RankFeatureShardRequest; +import org.elasticsearch.search.rank.feature.RankFeatureShardResult; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.tasks.TaskCancelHelper; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntConsumer; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; +import static org.elasticsearch.search.SearchService.DEFAULT_SIZE; +import static org.elasticsearch.search.SearchService.QUERY_PHASE_PARALLEL_COLLECTION_ENABLED; +import static org.elasticsearch.search.SearchService.SEARCH_WORKER_THREADS_ENABLED; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; + +public class SearchServiceSingleNodeTests extends ESSingleNodeTestCase { + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Collection> getPlugins() { + return pluginList( + FailOnRewriteQueryPlugin.class, + CustomScriptPlugin.class, + ReaderWrapperCountPlugin.class, + InternalOrPrivateSettingsPlugin.class, + MockSearchService.TestPlugin.class + ); + } + + public static class ReaderWrapperCountPlugin extends Plugin { + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.setReaderWrapper(service -> SearchServiceSingleNodeTests::apply); + } + } + + @Before + public void resetCount() { + numWrapInvocations = new AtomicInteger(0); + } + + private static AtomicInteger numWrapInvocations = new AtomicInteger(0); + + private static DirectoryReader apply(DirectoryReader directoryReader) throws IOException { + numWrapInvocations.incrementAndGet(); + return new FilterDirectoryReader(directoryReader, new FilterDirectoryReader.SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return reader; + } + }) { + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return in; + } + + @Override + public CacheHelper getReaderCacheHelper() { + return directoryReader.getReaderCacheHelper(); + } + }; + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + + static final String DUMMY_SCRIPT = "dummyScript"; + + @Override + protected Map, Object>> pluginScripts() { + return Collections.singletonMap(DUMMY_SCRIPT, vars -> "dummy"); + } + + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.addSearchOperationListener(new SearchOperationListener() { + @Override + public void onFetchPhase(SearchContext context, long tookInNanos) { + if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); + } else { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + } + } + + @Override + public void onQueryPhase(SearchContext context, long tookInNanos) { + if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); + } else { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + } + } + }); + } + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put("search.default_search_timeout", "5s").build(); + } + + public void testClearOnClose() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + assertEquals(0, service.getActiveContexts()); + } + + public void testClearOnStop() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + service.doStop(); + assertEquals(0, service.getActiveContexts()); + } + + public void testClearIndexDelete() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + assertAcked(indicesAdmin().prepareDelete("index")); + awaitIndexShardCloseAsyncTasks(); + assertEquals(0, service.getActiveContexts()); + } + + public void testCloseSearchContextOnRewriteException() { + // if refresh happens while checking the exception, the subsequent reference count might not match, so we switch it off + createIndex("index", Settings.builder().put("index.refresh_interval", -1).build()); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + final int activeContexts = service.getActiveContexts(); + final int activeRefs = indexShard.store().refCount(); + expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("index").setQuery(new FailOnRewriteQueryBuilder()).get() + ); + assertEquals(activeContexts, service.getActiveContexts()); + assertEquals(activeRefs, indexShard.store().refCount()); + } + + public void testSearchWhileIndexDeleted() throws InterruptedException { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + AtomicBoolean running = new AtomicBoolean(true); + CountDownLatch startGun = new CountDownLatch(1); + final int permitCount = 100; + Semaphore semaphore = new Semaphore(permitCount); + ShardRouting routing = TestShardRouting.newShardRouting( + indexShard.shardId(), + randomAlphaOfLength(5), + randomBoolean(), + ShardRoutingState.INITIALIZING + ); + final Thread thread = new Thread(() -> { + startGun.countDown(); + while (running.get()) { + if (randomBoolean()) { + service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED); + } else { + service.beforeIndexShardCreated(routing, indexService.getIndexSettings().getSettings()); + } + if (randomBoolean()) { + // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search + // context in a non-sane way. + try { + semaphore.acquire(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + prepareIndex("index").setSource("field", "value") + .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) + .execute(ActionListener.running(semaphore::release)); + } + } + }); + thread.start(); + startGun.await(); + try { + final int rounds = scaledRandomIntBetween(100, 10000); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) + .scroll(new Scroll(TimeValue.timeValueMinutes(1))); + for (int i = 0; i < rounds; i++) { + try { + try { + PlainActionFuture result = new PlainActionFuture<>(); + final boolean useScroll = randomBoolean(); + service.executeQueryPhase( + new ShardSearchRequest( + OriginalIndices.NONE, + useScroll ? scrollSearchRequest : searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ), + new SearchShardTask(123L, "", "", "", null, emptyMap()), + result.delegateFailure((l, r) -> { + r.incRef(); + l.onResponse(r); + }) + ); + final SearchPhaseResult searchPhaseResult = result.get(); + try { + List intCursors = new ArrayList<>(1); + intCursors.add(0); + ShardFetchRequest req = new ShardFetchRequest( + searchPhaseResult.getContextId(), + intCursors, + null/* not a scroll */ + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.executeFetchPhase(req, new SearchShardTask(123L, "", "", "", null, emptyMap()), listener); + listener.get(); + if (useScroll) { + // have to free context since this test does not remove the index from IndicesService. + service.freeReaderContext(searchPhaseResult.getContextId()); + } + } finally { + searchPhaseResult.decRef(); + } + } catch (ExecutionException ex) { + assertThat(ex.getCause(), instanceOf(RuntimeException.class)); + throw ((RuntimeException) ex.getCause()); + } + } catch (AlreadyClosedException ex) { + throw ex; + } catch (IllegalStateException ex) { + assertEquals(AbstractRefCounted.ALREADY_CLOSED_MESSAGE, ex.getMessage()); + } catch (SearchContextMissingException ex) { + // that's fine + } + } + } finally { + running.set(false); + thread.join(); + semaphore.acquire(permitCount); + } + + assertEquals(0, service.getActiveContexts()); + + SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); + assertEquals(0, totalStats.getQueryCurrent()); + assertEquals(0, totalStats.getScrollCurrent()); + assertEquals(0, totalStats.getFetchCurrent()); + } + + public void testRankFeaturePhaseSearchPhases() throws InterruptedException, ExecutionException { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + final SearchService service = getInstanceFromNode(SearchService.class); + + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex(indexName)); + final IndexShard indexShard = indexService.getShard(0); + SearchShardTask searchTask = new SearchShardTask(123L, "", "", "", null, emptyMap()); + + // create a SearchRequest that will return all documents and defines a TestRankBuilder with shard-level only operations + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true) + .source( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .size(DEFAULT_SIZE) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = (numDocs - i) + randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + }; + } + } + ) + ); + + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + QuerySearchResult queryResult = null; + RankFeatureResult rankResult = null; + try { + // Execute the query phase and store the result in a SearchPhaseResult container using a PlainActionFuture + PlainActionFuture queryPhaseResults = new PlainActionFuture<>(); + service.executeQueryPhase(request, searchTask, queryPhaseResults); + queryResult = (QuerySearchResult) queryPhaseResults.get(); + + // these are the matched docs from the query phase + final RankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; + + // assume that we have cut down to these from the coordinator node as the top-docs to run the rank feature phase upon + List topRankWindowSizeDocs = randomNonEmptySubsetOf(Arrays.stream(queryRankDocs).map(x -> x.doc).toList()); + + // now we create a RankFeatureShardRequest to extract feature info for the top-docs above + RankFeatureShardRequest rankFeatureShardRequest = new RankFeatureShardRequest( + OriginalIndices.NONE, + queryResult.getContextId(), // use the context from the query phase + request, + topRankWindowSizeDocs + ); + PlainActionFuture rankPhaseResults = new PlainActionFuture<>(); + service.executeRankFeaturePhase(rankFeatureShardRequest, searchTask, rankPhaseResults); + rankResult = rankPhaseResults.get(); + + assertNotNull(rankResult); + assertNotNull(rankResult.rankFeatureResult()); + RankFeatureShardResult rankFeatureShardResult = rankResult.rankFeatureResult().shardResult(); + assertNotNull(rankFeatureShardResult); + + List sortedRankWindowDocs = topRankWindowSizeDocs.stream().sorted().toList(); + assertEquals(sortedRankWindowDocs.size(), rankFeatureShardResult.rankFeatureDocs.length); + for (int i = 0; i < sortedRankWindowDocs.size(); i++) { + assertEquals((long) sortedRankWindowDocs.get(i), rankFeatureShardResult.rankFeatureDocs[i].doc); + assertEquals(rankFeatureShardResult.rankFeatureDocs[i].featureData, "aardvark_" + sortedRankWindowDocs.get(i)); + } + + List globalTopKResults = randomNonEmptySubsetOf( + Arrays.stream(rankFeatureShardResult.rankFeatureDocs).map(x -> x.doc).toList() + ); + + // finally let's create a fetch request to bring back fetch info for the top results + ShardFetchSearchRequest fetchRequest = new ShardFetchSearchRequest( + OriginalIndices.NONE, + rankResult.getContextId(), + request, + globalTopKResults, + null, + null, + rankResult.getRescoreDocIds(), + null + ); + + // execute fetch phase and perform any validations once we retrieve the response + // the difference in how we do assertions here is needed because once the transport service sends back the response + // it decrements the reference to the FetchSearchResult (through the ActionListener#respondAndRelease) and sets hits to null + PlainActionFuture fetchListener = new PlainActionFuture<>() { + @Override + public void onResponse(FetchSearchResult fetchSearchResult) { + assertNotNull(fetchSearchResult); + assertNotNull(fetchSearchResult.hits()); + + int totalHits = fetchSearchResult.hits().getHits().length; + assertEquals(globalTopKResults.size(), totalHits); + for (int i = 0; i < totalHits; i++) { + // rank and score are set by the SearchPhaseController#merge so no need to validate that here + SearchHit hit = fetchSearchResult.hits().getAt(i); + assertNotNull(hit.getFields().get(fetchFieldName)); + assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); + } + super.onResponse(fetchSearchResult); + } + + @Override + public void onFailure(Exception e) { + super.onFailure(e); + throw new AssertionError("No failure should have been raised", e); + } + }; + service.executeFetchPhase(fetchRequest, searchTask, fetchListener); + fetchListener.get(); + } catch (Exception ex) { + if (queryResult != null) { + if (queryResult.hasReferences()) { + queryResult.decRef(); + } + service.freeReaderContext(queryResult.getContextId()); + } + if (rankResult != null && rankResult.hasReferences()) { + rankResult.decRef(); + } + throw ex; + } + } + + public void testRankFeaturePhaseUsingClient() { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 4; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + ElasticsearchAssertions.assertResponse( + client().prepareSearch(indexName) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .size(2) + .from(2) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + scores[i] = featureDocs[i].score; + } + scoreListener.onResponse(scores); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + List rankDocs = new ArrayList<>(); + for (int i = 0; i < querySearchResults.size(); i++) { + QuerySearchResult querySearchResult = querySearchResults.get(i); + TestRankShardResult shardResult = (TestRankShardResult) querySearchResult + .getRankShardResult(); + for (RankDoc trd : shardResult.testRankDocs) { + trd.shardIndex = i; + rankDocs.add(trd); + } + } + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); + topDocStats.fetchHits = topResults.length; + return topResults; + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + }; + } + } + ) + ), + (response) -> { + SearchHits hits = response.getHits(); + assertEquals(hits.getTotalHits().value, numDocs); + assertEquals(hits.getHits().length, 2); + int index = 0; + for (SearchHit hit : hits.getHits()) { + assertEquals(hit.getRank(), 3 + index); + assertTrue(hit.getScore() >= 0); + assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); + index++; + } + } + ); + } + + public void testRankFeaturePhaseExceptionOnCoordinatingNode() { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(indexName) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .size(2) + .from(2) + .fetchField(fetchFieldName) + .rankBuilder(new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + throw new IllegalStateException("should have failed earlier"); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + throw new UnsupportedOperationException("simulated failure"); + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + }; + } + }) + ) + .get() + ); + } + + public void testRankFeaturePhaseExceptionAllShardFail() { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(indexName) + .setAllowPartialSearchResults(true) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + scores[i] = featureDocs[i].score; + } + scoreListener.onResponse(scores); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + List rankDocs = new ArrayList<>(); + for (int i = 0; i < querySearchResults.size(); i++) { + QuerySearchResult querySearchResult = querySearchResults.get(i); + TestRankShardResult shardResult = (TestRankShardResult) querySearchResult + .getRankShardResult(); + for (RankDoc trd : shardResult.testRankDocs) { + trd.shardIndex = i; + rankDocs.add(trd); + } + } + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); + topDocStats.fetchHits = topResults.length; + return topResults; + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + throw new UnsupportedOperationException("simulated failure"); + } + }; + } + } + ) + ) + .get() + ); + } + + public void testRankFeaturePhaseExceptionOneShardFails() { + // if we have only one shard and it fails, it will fallback to context.onPhaseFailure which will eventually clean up all contexts. + // in this test we want to make sure that even if one shard (of many) fails during the RankFeaturePhase, then the appropriate + // context will have been cleaned up. + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).build()); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + assertResponse( + client().prepareSearch(indexName) + .setAllowPartialSearchResults(true) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + scores[i] = featureDocs[i].score; + } + scoreListener.onResponse(scores); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + List rankDocs = new ArrayList<>(); + for (int i = 0; i < querySearchResults.size(); i++) { + QuerySearchResult querySearchResult = querySearchResults.get(i); + TestRankShardResult shardResult = (TestRankShardResult) querySearchResult + .getRankShardResult(); + for (RankDoc trd : shardResult.testRankDocs) { + trd.shardIndex = i; + rankDocs.add(trd); + } + } + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); + topDocStats.fetchHits = topResults.length; + return topResults; + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + if (shardId == 0) { + throw new UnsupportedOperationException("simulated failure"); + } else { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + } + }; + } + } + ) + ), + (searchResponse) -> { + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals("simulated failure", searchResponse.getShardFailures()[0].getCause().getMessage()); + assertNotEquals(0, searchResponse.getHits().getHits().length); + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertEquals(fetchFieldValue + "_" + hit.getId(), hit.getFields().get(fetchFieldName).getValue()); + assertEquals(1, hit.getShard().getShardId().id()); + } + } + ); + } + + public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws ExecutionException, InterruptedException { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); + service.setOnPutContext(context -> { + if (context.indexShard() == indexShard) { + assertAcked(indicesAdmin().prepareDelete("index")); + } + }); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) + .scroll(new Scroll(TimeValue.timeValueMinutes(1))); + + // the scrolls are not explicitly freed, but should all be gone when the test finished. + // for completeness, we also randomly test the regular search path. + final boolean useScroll = randomBoolean(); + PlainActionFuture result = new PlainActionFuture<>(); + service.executeQueryPhase( + new ShardSearchRequest( + OriginalIndices.NONE, + useScroll ? scrollSearchRequest : searchRequest, + new ShardId(resolveIndex("index"), 0), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ), + new SearchShardTask(123L, "", "", "", null, emptyMap()), + result + ); + + try { + result.get(); + } catch (Exception e) { + // ok + } + + expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().setIndices("index").get()); + + assertEquals(0, service.getActiveContexts()); + + SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); + assertEquals(0, totalStats.getQueryCurrent()); + assertEquals(0, totalStats.getScrollCurrent()); + assertEquals(0, totalStats.getFetchCurrent()); + } + + public void testBeforeShardLockDuringShardCreate() { + IndexService indexService = createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + service.beforeIndexShardCreated( + TestShardRouting.newShardRouting( + "test", + 0, + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomBoolean(), + ShardRoutingState.INITIALIZING + ), + indexService.getIndexSettings().getSettings() + ); + assertEquals(1, service.getActiveContexts()); + + service.beforeIndexShardCreated( + TestShardRouting.newShardRouting( + new ShardId(indexService.index(), 0), + randomAlphaOfLength(5), + randomBoolean(), + ShardRoutingState.INITIALIZING + ), + indexService.getIndexSettings().getSettings() + ); + assertEquals(0, service.getActiveContexts()); + } + + public void testTimeout() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + final ShardSearchRequest requestWithDefaultTimeout = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext contextWithDefaultTimeout = service.createContext( + reader, + requestWithDefaultTimeout, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + // the search context should inherit the default timeout + assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5))); + } + + final long seconds = randomIntBetween(6, 10); + searchRequest.source(new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds))); + final ShardSearchRequest requestWithCustomTimeout = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext( + reader, + requestWithCustomTimeout, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + // the search context should inherit the query timeout + assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds))); + } + } + + /** + * test that getting more than the allowed number of docvalue_fields throws an exception + */ + public void testMaxDocvalueFieldsSearch() throws IOException { + final Settings settings = Settings.builder().put(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey(), 1).build(); + createIndex("index", settings, null, "field1", "keyword", "field2", "keyword"); + prepareIndex("index").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + searchSourceBuilder.docValueField("field1"); + + final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ) { + assertNotNull(context); + } + + searchSourceBuilder.docValueField("unmapped_field"); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ) { + assertNotNull(context); + } + + searchSourceBuilder.docValueField("field2"); + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ); + assertEquals( + "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [1] but was [2]. " + + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", + ex.getMessage() + ); + } + } + + public void testDeduplicateDocValuesFields() throws Exception { + createIndex("index", Settings.EMPTY, "_doc", "field1", "type=date", "field2", "type=date"); + prepareIndex("index").setId("1").setSource("field1", "2022-08-03", "field2", "2022-08-04").setRefreshPolicy(IMMEDIATE).get(); + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + searchSourceBuilder.docValueField("f*"); + if (randomBoolean()) { + searchSourceBuilder.docValueField("field*"); + } + if (randomBoolean()) { + searchSourceBuilder.docValueField("*2"); + } + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + SearchContext context = service.createContext( + reader, + request, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + Collection fields = context.docValuesContext().fields(); + assertThat(fields, containsInAnyOrder(new FieldAndFormat("field1", null), new FieldAndFormat("field2", null))); + } + } + } + + /** + * test that getting more than the allowed number of script_fields throws an exception + */ + public void testMaxScriptFieldsSearch() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + // adding the maximum allowed number of script_fields to retrieve + int maxScriptFields = indexService.getIndexSettings().getMaxScriptFields(); + for (int i = 0; i < maxScriptFields; i++) { + searchSourceBuilder.scriptField( + "field" + i, + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) + ); + } + final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { + try ( + SearchContext context = service.createContext( + reader, + request, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + assertNotNull(context); + } + searchSourceBuilder.scriptField( + "anotherScriptField", + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) + ); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ); + assertEquals( + "Trying to retrieve too many script_fields. Must be less than or equal to: [" + + maxScriptFields + + "] but was [" + + (maxScriptFields + 1) + + "]. This limit can be set by changing the [index.max_script_fields] index level setting.", + ex.getMessage() + ); + } + } + + public void testIgnoreScriptfieldIfSizeZero() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + searchSourceBuilder.scriptField( + "field" + 0, + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) + ); + searchSourceBuilder.size(0); + final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ) { + assertEquals(0, context.scriptFields().fields().size()); + } + } + + /** + * test that creating more than the allowed number of scroll contexts throws an exception + */ + public void testMaxOpenScrollContexts() throws Exception { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + // Open all possible scrolls, clear some of them, then open more until the limit is reached + LinkedList clearScrollIds = new LinkedList<>(); + + for (int i = 0; i < SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); i++) { + assertResponse(client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), searchResponse -> { + if (randomInt(4) == 0) clearScrollIds.addLast(searchResponse.getScrollId()); + }); + } + + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.setScrollIds(clearScrollIds); + client().clearScroll(clearScrollRequest).get(); + + for (int i = 0; i < clearScrollIds.size(); i++) { + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)).get().decRef(); + } + + final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); + ElasticsearchException ex = expectThrows( + ElasticsearchException.class, + () -> service.createAndPutReaderContext( + request, + indexService, + indexShard, + indexShard.acquireSearcherSupplier(), + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ) + ); + assertEquals( + "Trying to create too many scroll contexts. Must be less than or equal to: [" + + SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_scroll_context] setting.", + ex.getMessage() + ); + assertEquals(RestStatus.TOO_MANY_REQUESTS, ex.status()); + + service.freeAllScrollContexts(); + } + + public void testOpenScrollContextsConcurrently() throws Exception { + createIndex("index"); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + final int maxScrollContexts = SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); + final SearchService searchService = getInstanceFromNode(SearchService.class); + Thread[] threads = new Thread[randomIntBetween(2, 8)]; + CountDownLatch latch = new CountDownLatch(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (;;) { + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); + try { + final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); + searchService.createAndPutReaderContext( + request, + indexService, + indexShard, + reader, + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + } catch (ElasticsearchException e) { + assertThat( + e.getMessage(), + equalTo( + "Trying to create too many scroll contexts. Must be less than or equal to: " + + "[" + + maxScrollContexts + + "]. " + + "This limit can be set by changing the [search.max_open_scroll_context] setting." + ) + ); + return; + } + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("elasticsearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertThat(searchService.getActiveContexts(), equalTo(maxScrollContexts)); + searchService.freeAllScrollContexts(); + } + + public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { + @Override + public List> getQueries() { + return singletonList(new QuerySpec<>("fail_on_rewrite_query", FailOnRewriteQueryBuilder::new, parseContext -> { + throw new UnsupportedOperationException("No query parser for this plugin"); + })); + } + } + + public static class FailOnRewriteQueryBuilder extends DummyQueryBuilder { + + public FailOnRewriteQueryBuilder(StreamInput in) throws IOException { + super(in); + } + + public FailOnRewriteQueryBuilder() {} + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { + if (queryRewriteContext.convertToSearchExecutionContext() != null) { + throw new IllegalStateException("Fail on rewrite phase"); + } + return this; + } + } + + private static class ShardScrollRequestTest extends ShardSearchRequest { + private Scroll scroll; + + ShardScrollRequestTest(ShardId shardId) { + super( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + shardId, + 0, + 1, + AliasFilter.EMPTY, + 1f, + -1, + null + ); + this.scroll = new Scroll(TimeValue.timeValueMinutes(1)); + } + + @Override + public Scroll scroll() { + return this.scroll; + } + } + + public void testCanMatch() throws Exception { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source(new SearchSourceBuilder()); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) + ); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + searchRequest.source( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) + ); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); + assertFalse( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + assertEquals(5, numWrapInvocations.get()); + + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + /* + * Checks that canMatch takes into account the alias filter + */ + // the source cannot be rewritten to a match_none + searchRequest.indices("alias").source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + assertFalse( + service.canMatch( + new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), + 1f, + -1, + null + ) + ).canMatch() + ); + // the source can match and can be rewritten to a match_none, but not the alias filter + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + searchRequest.indices("alias").source(new SearchSourceBuilder().query(new TermQueryBuilder("id", "1"))); + assertFalse( + service.canMatch( + new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), + 1f, + -1, + null + ) + ).canMatch() + ); + + CountDownLatch latch = new CountDownLatch(1); + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + // Because the foo field used in alias filter is unmapped the term query builder rewrite can resolve to a match no docs query, + // without acquiring a searcher and that means the wrapper is not called + assertEquals(5, numWrapInvocations.get()); + service.executeQueryPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + // make sure that the wrapper is called when the query is actually executed + assertEquals(6, numWrapInvocations.get()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + try { + throw new AssertionError(e); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + + public void testCanRewriteToMatchNone() { + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) + ) + ); + assertFalse(SearchService.canRewriteToMatchNone(new SearchSourceBuilder())); + assertFalse(SearchService.canRewriteToMatchNone(null)); + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) + ) + ); + assertTrue(SearchService.canRewriteToMatchNone(new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")))); + assertTrue( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) + ) + ); + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) + .suggest(new SuggestBuilder()) + ) + ); + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")).suggest(new SuggestBuilder()) + ) + ); + } + + public void testSetSearchThrottled() throws IOException { + createIndex("throttled_threadpool_index"); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( + "throttled_threadpool_index", + IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), + "true" + ) + ).actionGet(); + final SearchService service = getInstanceFromNode(SearchService.class); + Index index = resolveIndex("throttled_threadpool_index"); + assertTrue(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); + prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertSearchHits( + client().prepareSearch("throttled_threadpool_index") + .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) + .setSize(1), + "1" + ); + // we add a search action listener in a plugin above to assert that this is actually used + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( + "throttled_threadpool_index", + IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), + "false" + ) + ).actionGet(); + + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> indicesAdmin().prepareUpdateSettings("throttled_threadpool_index") + .setSettings(Settings.builder().put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), false)) + .get() + ); + assertEquals("can not update private setting [index.search.throttled]; this setting is managed by Elasticsearch", iae.getMessage()); + assertFalse(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); + } + + public void testAggContextGetsMatchAll() throws IOException { + createIndex("test"); + withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); + } + + public void testAggContextGetsNestedFilter() throws IOException { + XContentBuilder mapping = JsonXContent.contentBuilder().startObject().startObject("properties"); + mapping.startObject("nested").field("type", "nested").endObject(); + mapping.endObject().endObject(); + + createIndex("test", Settings.EMPTY, mapping); + withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); + } + + /** + * Build an {@link AggregationContext} with the named index. + */ + private void withAggregationContext(String index, Consumer check) throws IOException { + IndexService indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(resolveIndex(index)); + ShardId shardId = new ShardId(indexService.index(), 0); + + SearchRequest request = new SearchRequest().indices(index) + .source(new SearchSourceBuilder().aggregation(new FiltersAggregationBuilder("test", new MatchAllQueryBuilder()))) + .allowPartialSearchResults(false); + ShardSearchRequest shardRequest = new ShardSearchRequest( + OriginalIndices.NONE, + request, + shardId, + 0, + 1, + AliasFilter.EMPTY, + 1, + 0, + null + ); + + try (ReaderContext readerContext = createReaderContext(indexService, indexService.getShard(0))) { + try ( + SearchContext context = getInstanceFromNode(SearchService.class).createContext( + readerContext, + shardRequest, + mock(SearchShardTask.class), + ResultsType.QUERY, + true + ) + ) { + check.accept(context.aggregations().factories().context()); + } + } + } + + public void testExpandSearchThrottled() { + createIndex("throttled_threadpool_index"); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( + "throttled_threadpool_index", + IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), + "true" + ) + ).actionGet(); + + prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(client().prepareSearch(), 1L); + assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); + } + + public void testExpandSearchFrozen() { + String indexName = "frozen_index"; + createIndex(indexName); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request(indexName, "index.frozen", "true") + ).actionGet(); + + prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(client().prepareSearch(), 0L); + assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); + assertWarnings(TransportSearchAction.FROZEN_INDICES_DEPRECATION_MESSAGE.replace("{}", indexName)); + } + + public void testCreateReduceContext() { + SearchService service = getInstanceFromNode(SearchService.class); + AggregationReduceContext.Builder reduceContextBuilder = service.aggReduceContextBuilder( + () -> false, + new SearchRequest().source(new SearchSourceBuilder()).source().aggregations() + ); + { + AggregationReduceContext reduceContext = reduceContextBuilder.forFinalReduction(); + expectThrows( + MultiBucketConsumerService.TooManyBucketsException.class, + () -> reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1) + ); + } + { + AggregationReduceContext reduceContext = reduceContextBuilder.forPartialReduction(); + reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1); + } + } + + public void testMultiBucketConsumerServiceCB() { + MultiBucketConsumerService service = new MultiBucketConsumerService( + getInstanceFromNode(ClusterService.class), + Settings.EMPTY, + new NoopCircuitBreaker("test") { + + @Override + public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { + throw new CircuitBreakingException("tripped", getDurability()); + } + } + ); + // for partial + { + IntConsumer consumer = service.createForPartial(); + for (int i = 0; i < 1023; i++) { + consumer.accept(0); + } + CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); + assertThat(ex.getMessage(), equalTo("tripped")); + } + // for final + { + IntConsumer consumer = service.createForFinal(); + for (int i = 0; i < 1023; i++) { + consumer.accept(0); + } + CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); + assertThat(ex.getMessage(), equalTo("tripped")); + } + } + + public void testCreateSearchContext() throws IOException { + String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + ShardId shardId = new ShardId(indexService.index(), 0); + long nowInMillis = System.currentTimeMillis(); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(randomBoolean()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + 0, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + nowInMillis, + clusterAlias + ); + try (SearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { + SearchShardTarget searchShardTarget = searchContext.shardTarget(); + SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); + String expectedIndexName = clusterAlias == null ? index : clusterAlias + ":" + index; + assertEquals(expectedIndexName, searchExecutionContext.getFullyQualifiedIndex().getName()); + assertEquals(expectedIndexName, searchShardTarget.getFullyQualifiedIndexName()); + assertEquals(clusterAlias, searchShardTarget.getClusterAlias()); + assertEquals(shardId, searchShardTarget.getShardId()); + + assertNull(searchContext.dfsResult()); + searchContext.addDfsResult(); + assertSame(searchShardTarget, searchContext.dfsResult().getSearchShardTarget()); + + assertNull(searchContext.queryResult()); + searchContext.addQueryResult(); + assertSame(searchShardTarget, searchContext.queryResult().getSearchShardTarget()); + + assertNull(searchContext.fetchResult()); + searchContext.addFetchResult(); + assertSame(searchShardTarget, searchContext.fetchResult().getSearchShardTarget()); + } + } + + /** + * While we have no NPE in DefaultContext constructor anymore, we still want to guard against it (or other failures) in the future to + * avoid leaking searchers. + */ + public void testCreateSearchContextFailure() throws Exception { + final String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + final IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + final ShardId shardId = new ShardId(indexService.index(), 0); + final ShardSearchRequest request = new ShardSearchRequest(shardId, 0, null) { + @Override + public SearchType searchType() { + // induce an artificial NPE + throw new NullPointerException("expected"); + } + }; + try (ReaderContext reader = createReaderContext(indexService, indexService.getShard(shardId.id()))) { + NullPointerException e = expectThrows( + NullPointerException.class, + () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ); + assertEquals("expected", e.getMessage()); + } + // Needs to busily assert because Engine#refreshNeeded can increase the refCount. + assertBusy( + () -> assertEquals("should have 2 store refs (IndexService + InternalEngine)", 2, indexService.getShard(0).store().refCount()) + ); + } + + public void testMatchNoDocsEmptyResponse() throws InterruptedException { + createIndex("index"); + Thread currentThread = Thread.currentThread(); + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().aggregation(AggregationBuilders.count("count").field("value"))); + ShardSearchRequest shardRequest = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 5, + AliasFilter.EMPTY, + 1.0f, + 0, + null + ); + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + + { + CountDownLatch latch = new CountDownLatch(1); + shardRequest.source().query(new MatchAllQueryBuilder()); + service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult result) { + try { + assertNotSame(Thread.currentThread(), currentThread); + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + assertThat(result, instanceOf(QuerySearchResult.class)); + assertFalse(result.queryResult().isNull()); + assertNotNull(result.queryResult().topDocs()); + assertNotNull(result.queryResult().aggregations()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exc) { + try { + throw new AssertionError(exc); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + + { + CountDownLatch latch = new CountDownLatch(1); + shardRequest.source().query(new MatchNoneQueryBuilder()); + service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult result) { + try { + assertNotSame(Thread.currentThread(), currentThread); + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + assertThat(result, instanceOf(QuerySearchResult.class)); + assertFalse(result.queryResult().isNull()); + assertNotNull(result.queryResult().topDocs()); + assertNotNull(result.queryResult().aggregations()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exc) { + try { + throw new AssertionError(exc); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + + { + CountDownLatch latch = new CountDownLatch(1); + shardRequest.canReturnNullResponseIfMatchNoDocs(true); + service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult result) { + try { + // make sure we don't use the search threadpool + assertSame(Thread.currentThread(), currentThread); + assertThat(result, instanceOf(QuerySearchResult.class)); + assertTrue(result.queryResult().isNull()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + try { + throw new AssertionError(e); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + } + + public void testDeleteIndexWhileSearch() throws Exception { + createIndex("test"); + int numDocs = randomIntBetween(1, 20); + for (int i = 0; i < numDocs; i++) { + prepareIndex("test").setSource("f", "v").get(); + } + indicesAdmin().prepareRefresh("test").get(); + AtomicBoolean stopped = new AtomicBoolean(false); + Thread[] searchers = new Thread[randomIntBetween(1, 4)]; + CountDownLatch latch = new CountDownLatch(searchers.length); + for (int i = 0; i < searchers.length; i++) { + searchers[i] = new Thread(() -> { + latch.countDown(); + while (stopped.get() == false) { + try { + client().prepareSearch("test").setRequestCache(false).get().decRef(); + } catch (Exception ignored) { + return; + } + } + }); + searchers[i].start(); + } + latch.await(); + indicesAdmin().prepareDelete("test").get(); + stopped.set(true); + for (Thread searcher : searchers) { + searcher.join(); + } + } + + public void testLookUpSearchContext() throws Exception { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + List contextIds = new ArrayList<>(); + int numContexts = randomIntBetween(1, 10); + CountDownLatch latch = new CountDownLatch(1); + indexShard.getThreadPool().executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + for (int i = 0; i < numContexts; i++) { + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + final ReaderContext context = searchService.createAndPutReaderContext( + request, + indexService, + indexShard, + indexShard.acquireSearcherSupplier(), + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + assertThat(context.id().getId(), equalTo((long) (i + 1))); + contextIds.add(context.id()); + } + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + while (contextIds.isEmpty() == false) { + final ShardSearchContextId contextId = randomFrom(contextIds); + assertFalse(searchService.freeReaderContext(new ShardSearchContextId(UUIDs.randomBase64UUID(), contextId.getId()))); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + if (randomBoolean()) { + assertTrue(searchService.freeReaderContext(contextId)); + } else { + assertTrue( + searchService.freeReaderContext((new ShardSearchContextId(contextId.getSessionId(), contextId.getId()))) + ); + } + contextIds.remove(contextId); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + assertFalse(searchService.freeReaderContext(contextId)); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + } + } finally { + latch.countDown(); + } + }); + latch.await(); + } + + public void testOpenReaderContext() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + public void testCancelQueryPhaseEarly() throws Exception { + createIndex("index"); + final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + CountDownLatch latch1 = new CountDownLatch(1); + SearchShardTask task = new SearchShardTask(1, "", "", "", TaskId.EMPTY_TASK_ID, emptyMap()); + service.executeQueryPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + service.freeReaderContext(searchPhaseResult.getContextId()); + latch1.countDown(); + } + + @Override + public void onFailure(Exception e) { + try { + fail("Search should not be cancelled"); + } finally { + latch1.countDown(); + } + } + }); + latch1.await(); + + CountDownLatch latch2 = new CountDownLatch(1); + service.executeDfsPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + service.freeReaderContext(searchPhaseResult.getContextId()); + latch2.countDown(); + } + + @Override + public void onFailure(Exception e) { + try { + fail("Search should not be cancelled"); + } finally { + latch2.countDown(); + } + } + }); + latch2.await(); + + AtomicBoolean searchContextCreated = new AtomicBoolean(false); + service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); + CountDownLatch latch3 = new CountDownLatch(1); + TaskCancelHelper.cancel(task, "simulated"); + service.executeQueryPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + fail("Search not cancelled early"); + } finally { + service.freeReaderContext(searchPhaseResult.getContextId()); + searchPhaseResult.decRef(); + latch3.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + assertThat(e, is(instanceOf(TaskCancelledException.class))); + assertThat(e.getMessage(), is("task cancelled [simulated]")); + assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); + assertThat(searchContextCreated.get(), is(false)); + latch3.countDown(); + } + }); + latch3.await(); + + searchContextCreated.set(false); + CountDownLatch latch4 = new CountDownLatch(1); + service.executeDfsPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + fail("Search not cancelled early"); + } finally { + service.freeReaderContext(searchPhaseResult.getContextId()); + latch4.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + assertThat(e, is(instanceOf(TaskCancelledException.class))); + assertThat(e.getMessage(), is("task cancelled [simulated]")); + assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); + assertThat(searchContextCreated.get(), is(false)); + latch4.countDown(); + } + }); + latch4.await(); + } + + public void testCancelFetchPhaseEarly() throws Exception { + createIndex("index"); + final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + + AtomicBoolean searchContextCreated = new AtomicBoolean(false); + service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); + + // Test fetch phase is cancelled early + String scrollId; + var searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); + try { + scrollId = searchResponse.getScrollId(); + } finally { + searchResponse.decRef(); + } + + client().searchScroll(new SearchScrollRequest(scrollId)).get().decRef(); + assertThat(searchContextCreated.get(), is(true)); + + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(scrollId); + client().clearScroll(clearScrollRequest); + + searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); + try { + scrollId = searchResponse.getScrollId(); + } finally { + searchResponse.decRef(); + } + searchContextCreated.set(false); + service.setOnCheckCancelled(t -> { + SearchShardTask task = new SearchShardTask(randomLong(), "transport", "action", "", TaskId.EMPTY_TASK_ID, emptyMap()); + TaskCancelHelper.cancel(task, "simulated"); + return task; + }); + CountDownLatch latch = new CountDownLatch(1); + client().searchScroll(new SearchScrollRequest(scrollId), new ActionListener<>() { + @Override + public void onResponse(SearchResponse searchResponse) { + try { + fail("Search not cancelled early"); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + Throwable cancelledExc = e.getCause().getCause(); + assertThat(cancelledExc, is(instanceOf(TaskCancelledException.class))); + assertThat(cancelledExc.getMessage(), is("task cancelled [simulated]")); + assertThat(((TaskCancelledException) cancelledExc).status(), is(RestStatus.BAD_REQUEST)); + latch.countDown(); + } + }); + latch.await(); + assertThat(searchContextCreated.get(), is(false)); + + clearScrollRequest.setScrollIds(singletonList(scrollId)); + client().clearScroll(clearScrollRequest); + } + + public void testWaitOnRefresh() throws ExecutionException, InterruptedException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + PlainActionFuture future = new PlainActionFuture<>(); + service.executeQueryPhase(request, task, future.delegateFailure((l, r) -> { + assertEquals(1, r.queryResult().getTotalHits().value); + l.onResponse(null); + })); + future.get(); + } + + public void testWaitOnRefreshFailsWithRefreshesDisabled() { + createIndex("index", Settings.builder().put("index.refresh_interval", "-1").build()); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + PlainActionFuture future = new PlainActionFuture<>(); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + service.executeQueryPhase(request, task, future); + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat( + illegalArgumentException.getMessage(), + containsString("Cannot use wait_for_checkpoints with [index.refresh_interval=-1]") + ); + } + + public void testWaitOnRefreshFailsIfCheckpointNotIndexed() { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + // Increased timeout to avoid cancelling the search task prior to its completion, + // as we expect to raise an Exception. Timeout itself is tested on the following `testWaitOnRefreshTimeout` test. + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(200, 300))); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 1 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + PlainActionFuture future = new PlainActionFuture<>(); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + service.executeQueryPhase(request, task, future); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat( + ex.getMessage(), + containsString("Cannot wait for unissued seqNo checkpoint [wait_for_checkpoint=1, max_issued_seqNo=0]") + ); + } + + public void testWaitOnRefreshTimeout() { + createIndex("index", Settings.builder().put("index.refresh_interval", "60s").build()); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(10, 100))); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + PlainActionFuture future = new PlainActionFuture<>(); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + service.executeQueryPhase(request, task, future); + + SearchTimeoutException ex = expectThrows(SearchTimeoutException.class, future::actionGet); + assertThat(ex.getMessage(), containsString("Wait for seq_no [0] refreshed timed out [")); + } + + public void testMinimalSearchSourceInShardRequests() { + createIndex("test"); + int numDocs = between(0, 10); + for (int i = 0; i < numDocs; i++) { + prepareIndex("test").setSource("id", Integer.toString(i)).get(); + } + indicesAdmin().prepareRefresh("test").get(); + + BytesReference pitId = client().execute( + TransportOpenPointInTimeAction.TYPE, + new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(10)) + ).actionGet().getPointInTimeId(); + final MockSearchService searchService = (MockSearchService) getInstanceFromNode(SearchService.class); + final List shardRequests = new CopyOnWriteArrayList<>(); + searchService.setOnCreateSearchContext(ctx -> shardRequests.add(ctx.request())); + try { + assertHitCount( + client().prepareSearch() + .setSource( + new SearchSourceBuilder().size(between(numDocs, numDocs * 2)).pointInTimeBuilder(new PointInTimeBuilder(pitId)) + ), + numDocs + ); + } finally { + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); + } + assertThat(shardRequests, not(emptyList())); + for (ShardSearchRequest shardRequest : shardRequests) { + assertNotNull(shardRequest.source()); + assertNotNull(shardRequest.source().pointInTimeBuilder()); + assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo(BytesArray.EMPTY)); + } + } + + public void testDfsQueryPhaseRewrite() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.source(SearchSourceBuilder.searchSource().query(new TestRewriteCounterQueryBuilder())); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); + ReaderContext context = service.createAndPutReaderContext( + request, + indexService, + indexShard, + reader, + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + PlainActionFuture plainActionFuture = new PlainActionFuture<>(); + service.executeQueryPhase( + new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), + new SearchShardTask(42L, "", "", "", null, emptyMap()), + plainActionFuture + ); + + plainActionFuture.actionGet(); + assertThat(((TestRewriteCounterQueryBuilder) request.source().query()).asyncRewriteCount, equalTo(1)); + final ShardSearchContextId contextId = context.id(); + assertTrue(service.freeReaderContext(contextId)); + } + + public void testEnableSearchWorkerThreads() throws IOException { + IndexService indexService = createIndex("index", Settings.EMPTY); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()), + indexShard.shardId(), + 0, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + System.currentTimeMillis(), + null + ); + try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { + SearchService service = getInstanceFromNode(SearchService.class); + SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); + + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertNotNull(searchContext.searcher().getExecutor()); + } + + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(SEARCH_WORKER_THREADS_ENABLED.getKey(), false).build()) + .get(); + assertTrue(response.isAcknowledged()); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertNull(searchContext.searcher().getExecutor()); + } + } finally { + // reset original default setting + client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull(SEARCH_WORKER_THREADS_ENABLED.getKey()).build()) + .get(); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertNotNull(searchContext.searcher().getExecutor()); + } + } + } + } + + /** + * Verify that a single slice is created for requests that don't support parallel collection, while an executor is still + * provided to the searcher to parallelize other operations. Also ensure multiple slices are created for requests that do support + * parallel collection. + */ + public void testSlicingBehaviourForParallelCollection() throws Exception { + IndexService indexService = createIndex("index", Settings.EMPTY); + ThreadPoolExecutor executor = (ThreadPoolExecutor) indexService.getThreadPool().executor(ThreadPool.Names.SEARCH); + final int configuredMaxPoolSize = 10; + executor.setMaximumPoolSize(configuredMaxPoolSize); // We set this explicitly to be independent of CPU cores. + int numDocs = randomIntBetween(50, 100); + for (int i = 0; i < numDocs; i++) { + prepareIndex("index").setId(String.valueOf(i)).setSource("field", "value").get(); + if (i % 5 == 0) { + indicesAdmin().prepareRefresh("index").get(); + } + } + final IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()), + indexShard.shardId(), + 0, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + System.currentTimeMillis(), + null + ); + SearchService service = getInstanceFromNode(SearchService.class); + NonCountingTermQuery termQuery = new NonCountingTermQuery(new Term("field", "value")); + assertEquals(0, executor.getCompletedTaskCount()); + try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { + SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNotNull(searcher.getExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "DFS supports parallel collection, so the number of slices should be > 1.", + expectedSlices - 1, // one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNotNull(searcher.getExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", + expectedSlices - 1, // one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.FETCH, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNull(searcher.getExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 as FETCH does not support parallel collection and thus runs on the calling" + + " thread.", + 0, + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.NONE, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNull(searcher.getExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 as NONE does not support parallel collection.", + 0, // zero since one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) + .get(); + assertTrue(response.isAcknowledged()); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNull(searcher.getExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 when QUERY parallel collection is disabled.", + 0, // zero since one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + } finally { + // Reset to the original default setting and check to ensure it takes effect. + client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) + .get(); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertNotNull(searcher.getExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager()); + assertBusy( + () -> assertEquals( + "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", + expectedSlices - 1, // one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + } + } + } + + private static ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { + return new ReaderContext( + new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()), + indexService, + indexShard, + indexShard.acquireSearcherSupplier(), + randomNonNegativeLong(), + false + ); + } + + private static class TestRewriteCounterQueryBuilder extends AbstractQueryBuilder { + + final int asyncRewriteCount; + final Supplier fetched; + + TestRewriteCounterQueryBuilder() { + asyncRewriteCount = 0; + fetched = null; + } + + private TestRewriteCounterQueryBuilder(int asyncRewriteCount, Supplier fetched) { + this.asyncRewriteCount = asyncRewriteCount; + this.fetched = fetched; + } + + @Override + public String getWriteableName() { + return "test_query"; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ZERO; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException {} + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException {} + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + return new MatchAllDocsQuery(); + } + + @Override + protected boolean doEquals(TestRewriteCounterQueryBuilder other) { + return true; + } + + @Override + protected int doHashCode() { + return 42; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (asyncRewriteCount > 0) { + return this; + } + if (fetched != null) { + if (fetched.get() == null) { + return this; + } + assert fetched.get(); + return new TestRewriteCounterQueryBuilder(1, null); + } + if (queryRewriteContext.convertToDataRewriteContext() != null) { + SetOnce awaitingFetch = new SetOnce<>(); + queryRewriteContext.registerAsyncAction((c, l) -> { + awaitingFetch.set(true); + l.onResponse(null); + }); + return new TestRewriteCounterQueryBuilder(0, awaitingFetch::get); + } + return this; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 642804730a144..31bcab31ca8a7 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -6,3121 +6,298 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ + package org.elasticsearch.search; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TotalHitCountCollectorManager; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocWriteResponse; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.SortField; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeRequest; -import org.elasticsearch.action.search.SearchPhaseController; -import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.search.SearchShardTask; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.search.TransportClosePointInTimeAction; -import org.elasticsearch.action.search.TransportOpenPointInTimeAction; -import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.breaker.NoopCircuitBreaker; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.AbstractRefCounted; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.LeafFieldData; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.MapperMetrics; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MetadataFieldMapper; +import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.mapper.RootObjectMapper; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.settings.InternalOrPrivateSettingsPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.script.MockScriptEngine; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.SearchService.ResultsType; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.AggregationReduceContext; -import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.collapse.CollapseBuilder; -import org.elasticsearch.search.dfs.AggregatedDfs; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.fetch.ShardFetchRequest; -import org.elasticsearch.search.fetch.ShardFetchSearchRequest; -import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.ContextIndexSearcher; -import org.elasticsearch.search.internal.ReaderContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.query.NonCountingTermQuery; -import org.elasticsearch.search.query.QuerySearchRequest; -import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.rank.RankBuilder; -import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.rank.RankShardResult; -import org.elasticsearch.search.rank.TestRankBuilder; -import org.elasticsearch.search.rank.TestRankShardResult; -import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; -import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; -import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; -import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; -import org.elasticsearch.search.rank.feature.RankFeatureDoc; -import org.elasticsearch.search.rank.feature.RankFeatureResult; -import org.elasticsearch.search.rank.feature.RankFeatureShardRequest; -import org.elasticsearch.search.rank.feature.RankFeatureShardResult; -import org.elasticsearch.search.slice.SliceBuilder; -import org.elasticsearch.search.suggest.SuggestBuilder; -import org.elasticsearch.tasks.TaskCancelHelper; -import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.json.JsonXContent; -import org.junit.Before; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.MinAndMax; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; -import java.util.Comparator; -import java.util.LinkedList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.IntConsumer; -import java.util.function.Supplier; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; -import static org.elasticsearch.search.SearchService.DEFAULT_SIZE; -import static org.elasticsearch.search.SearchService.QUERY_PHASE_PARALLEL_COLLECTION_ENABLED; -import static org.elasticsearch.search.SearchService.SEARCH_WORKER_THREADS_ENABLED; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.startsWith; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.not; -import static org.mockito.Mockito.mock; +import java.util.function.BiFunction; +import java.util.function.Predicate; -public class SearchServiceTests extends ESSingleNodeTestCase { +public class SearchServiceTests extends IndexShardTestCase { - @Override - protected boolean resetNodeAfterTest() { - return true; + public void testCanMatchMatchAll() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + doTestCanMatch(searchRequest, null, true, null, false); } - @Override - protected Collection> getPlugins() { - return pluginList( - FailOnRewriteQueryPlugin.class, - CustomScriptPlugin.class, - ReaderWrapperCountPlugin.class, - InternalOrPrivateSettingsPlugin.class, - MockSearchService.TestPlugin.class - ); + public void testCanMatchMatchNone() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); + doTestCanMatch(searchRequest, null, false, null, false); } - public static class ReaderWrapperCountPlugin extends Plugin { - @Override - public void onIndexModule(IndexModule indexModule) { - indexModule.setReaderWrapper(service -> SearchServiceTests::apply); - } + public void testCanMatchMatchNoneWithException() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); + doTestCanMatch(searchRequest, null, true, null, true); } - @Before - public void resetCount() { - numWrapInvocations = new AtomicInteger(0); + public void testCanMatchKeywordSortedQueryMatchNone() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchNoneQueryBuilder())); + SortField sortField = new SortField("field", SortField.Type.STRING); + doTestCanMatch(searchRequest, sortField, false, null, false); } - private static AtomicInteger numWrapInvocations = new AtomicInteger(0); - - private static DirectoryReader apply(DirectoryReader directoryReader) throws IOException { - numWrapInvocations.incrementAndGet(); - return new FilterDirectoryReader(directoryReader, new FilterDirectoryReader.SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader reader) { - return reader; - } - }) { - @Override - protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { - return in; - } + public void testCanMatchKeywordSortedQueryMatchAll() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchAllQueryBuilder())); + SortField sortField = new SortField("field", SortField.Type.STRING); + MinAndMax expectedMinAndMax = new MinAndMax<>(new BytesRef("value"), new BytesRef("value")); + doTestCanMatch(searchRequest, sortField, true, expectedMinAndMax, false); + } + public void testCanMatchKeywordSortedQueryMatchNoneWithException() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchNoneQueryBuilder())); + // provide a sort field that throws exception + SortField sortField = new SortField("field", SortField.Type.STRING) { @Override - public CacheHelper getReaderCacheHelper() { - return directoryReader.getReaderCacheHelper(); + public Type getType() { + throw new UnsupportedOperationException(); } }; + doTestCanMatch(searchRequest, sortField, false, null, false); } - public static class CustomScriptPlugin extends MockScriptPlugin { - - static final String DUMMY_SCRIPT = "dummyScript"; - - @Override - protected Map, Object>> pluginScripts() { - return Collections.singletonMap(DUMMY_SCRIPT, vars -> "dummy"); - } - - @Override - public void onIndexModule(IndexModule indexModule) { - indexModule.addSearchOperationListener(new SearchOperationListener() { - @Override - public void onFetchPhase(SearchContext context, long tookInNanos) { - if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); - } else { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - } - } - - @Override - public void onQueryPhase(SearchContext context, long tookInNanos) { - if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); - } else { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - } - } - }); - } - } - - @Override - protected Settings nodeSettings() { - return Settings.builder().put("search.default_search_timeout", "5s").build(); - } - - public void testClearOnClose() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test - assertEquals(0, service.getActiveContexts()); - } - - public void testClearOnStop() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - service.doStop(); - assertEquals(0, service.getActiveContexts()); - } - - public void testClearIndexDelete() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - assertAcked(indicesAdmin().prepareDelete("index")); - awaitIndexShardCloseAsyncTasks(); - assertEquals(0, service.getActiveContexts()); - } - - public void testCloseSearchContextOnRewriteException() { - // if refresh happens while checking the exception, the subsequent reference count might not match, so we switch it off - createIndex("index", Settings.builder().put("index.refresh_interval", -1).build()); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - - final int activeContexts = service.getActiveContexts(); - final int activeRefs = indexShard.store().refCount(); - expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("index").setQuery(new FailOnRewriteQueryBuilder()).get() - ); - assertEquals(activeContexts, service.getActiveContexts()); - assertEquals(activeRefs, indexShard.store().refCount()); - } - - public void testSearchWhileIndexDeleted() throws InterruptedException { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - AtomicBoolean running = new AtomicBoolean(true); - CountDownLatch startGun = new CountDownLatch(1); - final int permitCount = 100; - Semaphore semaphore = new Semaphore(permitCount); - ShardRouting routing = TestShardRouting.newShardRouting( - indexShard.shardId(), - randomAlphaOfLength(5), - randomBoolean(), - ShardRoutingState.INITIALIZING - ); - final Thread thread = new Thread(() -> { - startGun.countDown(); - while (running.get()) { - if (randomBoolean()) { - service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED); - } else { - service.beforeIndexShardCreated(routing, indexService.getIndexSettings().getSettings()); - } - if (randomBoolean()) { - // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search - // context in a non-sane way. - try { - semaphore.acquire(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - prepareIndex("index").setSource("field", "value") - .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) - .execute(ActionListener.running(semaphore::release)); - } - } - }); - thread.start(); - startGun.await(); - try { - final int rounds = scaledRandomIntBetween(100, 10000); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) - .scroll(new Scroll(TimeValue.timeValueMinutes(1))); - for (int i = 0; i < rounds; i++) { - try { - try { - PlainActionFuture result = new PlainActionFuture<>(); - final boolean useScroll = randomBoolean(); - service.executeQueryPhase( - new ShardSearchRequest( - OriginalIndices.NONE, - useScroll ? scrollSearchRequest : searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ), - new SearchShardTask(123L, "", "", "", null, emptyMap()), - result.delegateFailure((l, r) -> { - r.incRef(); - l.onResponse(r); - }) - ); - final SearchPhaseResult searchPhaseResult = result.get(); - try { - List intCursors = new ArrayList<>(1); - intCursors.add(0); - ShardFetchRequest req = new ShardFetchRequest( - searchPhaseResult.getContextId(), - intCursors, - null/* not a scroll */ - ); - PlainActionFuture listener = new PlainActionFuture<>(); - service.executeFetchPhase(req, new SearchShardTask(123L, "", "", "", null, emptyMap()), listener); - listener.get(); - if (useScroll) { - // have to free context since this test does not remove the index from IndicesService. - service.freeReaderContext(searchPhaseResult.getContextId()); - } - } finally { - searchPhaseResult.decRef(); - } - } catch (ExecutionException ex) { - assertThat(ex.getCause(), instanceOf(RuntimeException.class)); - throw ((RuntimeException) ex.getCause()); - } - } catch (AlreadyClosedException ex) { - throw ex; - } catch (IllegalStateException ex) { - assertEquals(AbstractRefCounted.ALREADY_CLOSED_MESSAGE, ex.getMessage()); - } catch (SearchContextMissingException ex) { - // that's fine - } + public void testCanMatchKeywordSortedQueryMatchAllWithException() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchAllQueryBuilder())); + // provide a sort field that throws exception + SortField sortField = new SortField("field", SortField.Type.STRING) { + @Override + public Type getType() { + throw new UnsupportedOperationException(); } - } finally { - running.set(false); - thread.join(); - semaphore.acquire(permitCount); - } - - assertEquals(0, service.getActiveContexts()); - - SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); - assertEquals(0, totalStats.getQueryCurrent()); - assertEquals(0, totalStats.getScrollCurrent()); - assertEquals(0, totalStats.getFetchCurrent()); + }; + doTestCanMatch(searchRequest, sortField, true, null, false); } - public void testRankFeaturePhaseSearchPhases() throws InterruptedException, ExecutionException { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - final SearchService service = getInstanceFromNode(SearchService.class); - - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex(indexName)); - final IndexShard indexShard = indexService.getShard(0); - SearchShardTask searchTask = new SearchShardTask(123L, "", "", "", null, emptyMap()); - - // create a SearchRequest that will return all documents and defines a TestRankBuilder with shard-level only operations - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true) - .source( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .size(DEFAULT_SIZE) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = (numDocs - i) + randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - }; - } - } - ) - ); - - ShardSearchRequest request = new ShardSearchRequest( + private void doTestCanMatch( + SearchRequest searchRequest, + SortField sortField, + boolean expectedCanMatch, + MinAndMax expectedMinAndMax, + boolean throwException + ) throws IOException { + ShardSearchRequest shardRequest = new ShardSearchRequest( OriginalIndices.NONE, searchRequest, - indexShard.shardId(), + new ShardId("index", "index", 0), 0, - 1, + 5, AliasFilter.EMPTY, 1.0f, - -1, + 0, null ); - QuerySearchResult queryResult = null; - RankFeatureResult rankResult = null; + IndexFieldData indexFieldData = indexFieldData(sortField); + IndexShard indexShard = newShard(true); try { - // Execute the query phase and store the result in a SearchPhaseResult container using a PlainActionFuture - PlainActionFuture queryPhaseResults = new PlainActionFuture<>(); - service.executeQueryPhase(request, searchTask, queryPhaseResults); - queryResult = (QuerySearchResult) queryPhaseResults.get(); - - // these are the matched docs from the query phase - final RankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; - - // assume that we have cut down to these from the coordinator node as the top-docs to run the rank feature phase upon - List topRankWindowSizeDocs = randomNonEmptySubsetOf(Arrays.stream(queryRankDocs).map(x -> x.doc).toList()); - - // now we create a RankFeatureShardRequest to extract feature info for the top-docs above - RankFeatureShardRequest rankFeatureShardRequest = new RankFeatureShardRequest( - OriginalIndices.NONE, - queryResult.getContextId(), // use the context from the query phase - request, - topRankWindowSizeDocs - ); - PlainActionFuture rankPhaseResults = new PlainActionFuture<>(); - service.executeRankFeaturePhase(rankFeatureShardRequest, searchTask, rankPhaseResults); - rankResult = rankPhaseResults.get(); - - assertNotNull(rankResult); - assertNotNull(rankResult.rankFeatureResult()); - RankFeatureShardResult rankFeatureShardResult = rankResult.rankFeatureResult().shardResult(); - assertNotNull(rankFeatureShardResult); - - List sortedRankWindowDocs = topRankWindowSizeDocs.stream().sorted().toList(); - assertEquals(sortedRankWindowDocs.size(), rankFeatureShardResult.rankFeatureDocs.length); - for (int i = 0; i < sortedRankWindowDocs.size(); i++) { - assertEquals((long) sortedRankWindowDocs.get(i), rankFeatureShardResult.rankFeatureDocs[i].doc); - assertEquals(rankFeatureShardResult.rankFeatureDocs[i].featureData, "aardvark_" + sortedRankWindowDocs.get(i)); - } - - List globalTopKResults = randomNonEmptySubsetOf( - Arrays.stream(rankFeatureShardResult.rankFeatureDocs).map(x -> x.doc).toList() - ); - - // finally let's create a fetch request to bring back fetch info for the top results - ShardFetchSearchRequest fetchRequest = new ShardFetchSearchRequest( - OriginalIndices.NONE, - rankResult.getContextId(), - request, - globalTopKResults, - null, - null, - rankResult.getRescoreDocIds(), - null - ); - - // execute fetch phase and perform any validations once we retrieve the response - // the difference in how we do assertions here is needed because once the transport service sends back the response - // it decrements the reference to the FetchSearchResult (through the ActionListener#respondAndRelease) and sets hits to null - PlainActionFuture fetchListener = new PlainActionFuture<>() { - @Override - public void onResponse(FetchSearchResult fetchSearchResult) { - assertNotNull(fetchSearchResult); - assertNotNull(fetchSearchResult.hits()); - - int totalHits = fetchSearchResult.hits().getHits().length; - assertEquals(globalTopKResults.size(), totalHits); - for (int i = 0; i < totalHits; i++) { - // rank and score are set by the SearchPhaseController#merge so no need to validate that here - SearchHit hit = fetchSearchResult.hits().getAt(i); - assertNotNull(hit.getFields().get(fetchFieldName)); - assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); - } - super.onResponse(fetchSearchResult); - } - - @Override - public void onFailure(Exception e) { - super.onFailure(e); - throw new AssertionError("No failure should have been raised", e); - } - }; - service.executeFetchPhase(fetchRequest, searchTask, fetchListener); - fetchListener.get(); - } catch (Exception ex) { - if (queryResult != null) { - if (queryResult.hasReferences()) { - queryResult.decRef(); - } - service.freeReaderContext(queryResult.getContextId()); - } - if (rankResult != null && rankResult.hasReferences()) { - rankResult.decRef(); - } - throw ex; - } - } - - public void testRankFeaturePhaseUsingClient() { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 4; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - ElasticsearchAssertions.assertResponse( - client().prepareSearch(indexName) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .size(2) - .from(2) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - float[] scores = new float[featureDocs.length]; - for (int i = 0; i < featureDocs.length; i++) { - scores[i] = featureDocs[i].score; - } - scoreListener.onResponse(scores); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - List rankDocs = new ArrayList<>(); - for (int i = 0; i < querySearchResults.size(); i++) { - QuerySearchResult querySearchResult = querySearchResults.get(i); - TestRankShardResult shardResult = (TestRankShardResult) querySearchResult - .getRankShardResult(); - for (RankDoc trd : shardResult.testRankDocs) { - trd.shardIndex = i; - rankDocs.add(trd); - } - } - rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); - RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); - topDocStats.fetchHits = topResults.length; - return topResults; - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - }; - } - } - ) - ), - (response) -> { - SearchHits hits = response.getHits(); - assertEquals(hits.getTotalHits().value, numDocs); - assertEquals(hits.getHits().length, 2); - int index = 0; - for (SearchHit hit : hits.getHits()) { - assertEquals(hit.getRank(), 3 + index); - assertTrue(hit.getScore() >= 0); - assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); - index++; - } - } - ); - } - - public void testRankFeaturePhaseExceptionOnCoordinatingNode() { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch(indexName) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .size(2) - .from(2) - .fetchField(fetchFieldName) - .rankBuilder(new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - throw new IllegalStateException("should have failed earlier"); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - throw new UnsupportedOperationException("simulated failure"); - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - }; - } - }) - ) - .get() - ); - } - - public void testRankFeaturePhaseExceptionAllShardFail() { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch(indexName) - .setAllowPartialSearchResults(true) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - float[] scores = new float[featureDocs.length]; - for (int i = 0; i < featureDocs.length; i++) { - scores[i] = featureDocs[i].score; - } - scoreListener.onResponse(scores); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - List rankDocs = new ArrayList<>(); - for (int i = 0; i < querySearchResults.size(); i++) { - QuerySearchResult querySearchResult = querySearchResults.get(i); - TestRankShardResult shardResult = (TestRankShardResult) querySearchResult - .getRankShardResult(); - for (RankDoc trd : shardResult.testRankDocs) { - trd.shardIndex = i; - rankDocs.add(trd); - } - } - rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); - RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); - topDocStats.fetchHits = topResults.length; - return topResults; - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - throw new UnsupportedOperationException("simulated failure"); - } - }; - } - } - ) - ) - .get() - ); - } - - public void testRankFeaturePhaseExceptionOneShardFails() { - // if we have only one shard and it fails, it will fallback to context.onPhaseFailure which will eventually clean up all contexts. - // in this test we want to make sure that even if one shard (of many) fails during the RankFeaturePhase, then the appropriate - // context will have been cleaned up. - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).build()); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - assertResponse( - client().prepareSearch(indexName) - .setAllowPartialSearchResults(true) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - float[] scores = new float[featureDocs.length]; - for (int i = 0; i < featureDocs.length; i++) { - scores[i] = featureDocs[i].score; - } - scoreListener.onResponse(scores); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - List rankDocs = new ArrayList<>(); - for (int i = 0; i < querySearchResults.size(); i++) { - QuerySearchResult querySearchResult = querySearchResults.get(i); - TestRankShardResult shardResult = (TestRankShardResult) querySearchResult - .getRankShardResult(); - for (RankDoc trd : shardResult.testRankDocs) { - trd.shardIndex = i; - rankDocs.add(trd); - } - } - rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); - RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); - topDocStats.fetchHits = topResults.length; - return topResults; - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - if (shardId == 0) { - throw new UnsupportedOperationException("simulated failure"); - } else { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - } - }; - } - } - ) - ), - (searchResponse) -> { - assertEquals(1, searchResponse.getSuccessfulShards()); - assertEquals("simulated failure", searchResponse.getShardFailures()[0].getCause().getMessage()); - assertNotEquals(0, searchResponse.getHits().getHits().length); - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertEquals(fetchFieldValue + "_" + hit.getId(), hit.getFields().get(fetchFieldName).getValue()); - assertEquals(1, hit.getShard().getShardId().id()); + recoverShardFromStore(indexShard); + assertTrue(indexDoc(indexShard, "_doc", "id", "{\"field\":\"value\"}").isCreated()); + assertTrue(indexShard.refresh("test").refreshed()); + try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) { + SearchExecutionContext searchExecutionContext = createSearchExecutionContext( + (mappedFieldType, fieldDataContext) -> indexFieldData, + searcher + ); + SearchService.CanMatchContext canMatchContext = createCanMatchContext( + shardRequest, + indexShard, + searchExecutionContext, + parserConfig(), + throwException + ); + CanMatchShardResponse canMatchShardResponse = SearchService.canMatch(canMatchContext, false); + assertEquals(expectedCanMatch, canMatchShardResponse.canMatch()); + if (expectedMinAndMax == null) { + assertNull(canMatchShardResponse.estimatedMinAndMax()); + } else { + MinAndMax minAndMax = canMatchShardResponse.estimatedMinAndMax(); + assertNotNull(minAndMax); + assertEquals(expectedMinAndMax.getMin(), minAndMax.getMin()); + assertEquals(expectedMinAndMax.getMin(), minAndMax.getMax()); } - } - ); - } - public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws ExecutionException, InterruptedException { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - - MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); - service.setOnPutContext(context -> { - if (context.indexShard() == indexShard) { - assertAcked(indicesAdmin().prepareDelete("index")); } - }); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) - .scroll(new Scroll(TimeValue.timeValueMinutes(1))); - - // the scrolls are not explicitly freed, but should all be gone when the test finished. - // for completeness, we also randomly test the regular search path. - final boolean useScroll = randomBoolean(); - PlainActionFuture result = new PlainActionFuture<>(); - service.executeQueryPhase( - new ShardSearchRequest( - OriginalIndices.NONE, - useScroll ? scrollSearchRequest : searchRequest, - new ShardId(resolveIndex("index"), 0), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ), - new SearchShardTask(123L, "", "", "", null, emptyMap()), - result - ); - - try { - result.get(); - } catch (Exception e) { - // ok + } finally { + closeShards(indexShard); } - - expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().setIndices("index").get()); - - assertEquals(0, service.getActiveContexts()); - - SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); - assertEquals(0, totalStats.getQueryCurrent()); - assertEquals(0, totalStats.getScrollCurrent()); - assertEquals(0, totalStats.getFetchCurrent()); - } - - public void testBeforeShardLockDuringShardCreate() { - IndexService indexService = createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - service.beforeIndexShardCreated( - TestShardRouting.newShardRouting( - "test", - 0, - randomAlphaOfLength(5), - randomAlphaOfLength(5), - randomBoolean(), - ShardRoutingState.INITIALIZING - ), - indexService.getIndexSettings().getSettings() - ); - assertEquals(1, service.getActiveContexts()); - - service.beforeIndexShardCreated( - TestShardRouting.newShardRouting( - new ShardId(indexService.index(), 0), - randomAlphaOfLength(5), - randomBoolean(), - ShardRoutingState.INITIALIZING - ), - indexService.getIndexSettings().getSettings() - ); - assertEquals(0, service.getActiveContexts()); } - public void testTimeout() throws IOException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - final ShardSearchRequest requestWithDefaultTimeout = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), + private SearchExecutionContext createSearchExecutionContext( + BiFunction> indexFieldDataLookup, + IndexSearcher searcher + ) { + IndexMetadata indexMetadata = IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + Predicate indexNameMatcher = pattern -> Regex.simpleMatch(pattern, "index"); + + MapperBuilderContext root = MapperBuilderContext.root(false, false); + RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc", ObjectMapper.Defaults.SUBOBJECTS); + Mapping mapping = new Mapping( + builder.build(MapperBuilderContext.root(false, false)), + new MetadataFieldMapper[0], + Collections.emptyMap() + ); + KeywordFieldMapper keywordFieldMapper = new KeywordFieldMapper.Builder("field", IndexVersion.current()).build(root); + MappingLookup mappingLookup = MappingLookup.fromMappers( + mapping, + Collections.singletonList(keywordFieldMapper), + Collections.emptyList() + ); + return new SearchExecutionContext( 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext contextWithDefaultTimeout = service.createContext( - reader, - requestWithDefaultTimeout, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) - ) { - // the search context should inherit the default timeout - assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5))); - } - - final long seconds = randomIntBetween(6, 10); - searchRequest.source(new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds))); - final ShardSearchRequest requestWithCustomTimeout = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null + indexSettings, + null, + indexFieldDataLookup, + null, + mappingLookup, + null, + null, + parserConfig(), + writableRegistry(), + null, + searcher, + System::currentTimeMillis, + null, + indexNameMatcher, + () -> true, + null, + Collections.emptyMap(), + MapperMetrics.NOOP ); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext( - reader, - requestWithCustomTimeout, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) - ) { - // the search context should inherit the query timeout - assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds))); - } } - /** - * test that getting more than the allowed number of docvalue_fields throws an exception - */ - public void testMaxDocvalueFieldsSearch() throws IOException { - final Settings settings = Settings.builder().put(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey(), 1).build(); - createIndex("index", settings, null, "field1", "keyword", "field2", "keyword"); - prepareIndex("index").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - searchSourceBuilder.docValueField("field1"); - - final ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ) { - assertNotNull(context); - } - - searchSourceBuilder.docValueField("unmapped_field"); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ) { - assertNotNull(context); - } - - searchSourceBuilder.docValueField("field2"); - try (ReaderContext reader = createReaderContext(indexService, indexShard)) { - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ); - assertEquals( - "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [1] but was [2]. " - + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", - ex.getMessage() - ); - } - } + private static IndexFieldData indexFieldData(SortField sortField) { + return new IndexFieldData<>() { + @Override + public String getFieldName() { + return "field"; + } - public void testDeduplicateDocValuesFields() throws Exception { - createIndex("index", Settings.EMPTY, "_doc", "field1", "type=date", "field2", "type=date"); - prepareIndex("index").setId("1").setSource("field1", "2022-08-03", "field2", "2022-08-04").setRefreshPolicy(IMMEDIATE).get(); - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); + @Override + public ValuesSourceType getValuesSourceType() { + throw new UnsupportedOperationException(); + } - try (ReaderContext reader = createReaderContext(indexService, indexShard)) { - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - searchSourceBuilder.docValueField("f*"); - if (randomBoolean()) { - searchSourceBuilder.docValueField("field*"); + @Override + public LeafFieldData load(LeafReaderContext context) { + throw new UnsupportedOperationException(); } - if (randomBoolean()) { - searchSourceBuilder.docValueField("*2"); + + @Override + public LeafFieldData loadDirect(LeafReaderContext context) { + throw new UnsupportedOperationException(); } - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try ( - SearchContext context = service.createContext( - reader, - request, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) + + @Override + public SortField sortField( + Object missingValue, + MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, + boolean reverse ) { - Collection fields = context.docValuesContext().fields(); - assertThat(fields, containsInAnyOrder(new FieldAndFormat("field1", null), new FieldAndFormat("field2", null))); + return sortField; } - } - } - - /** - * test that getting more than the allowed number of script_fields throws an exception - */ - public void testMaxScriptFieldsSearch() throws IOException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - // adding the maximum allowed number of script_fields to retrieve - int maxScriptFields = indexService.getIndexSettings().getMaxScriptFields(); - for (int i = 0; i < maxScriptFields; i++) { - searchSourceBuilder.scriptField( - "field" + i, - new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) - ); - } - final ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try (ReaderContext reader = createReaderContext(indexService, indexShard)) { - try ( - SearchContext context = service.createContext( - reader, - request, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) + @Override + public BucketedSort newBucketedSort( + BigArrays bigArrays, + Object missingValue, + MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, + SortOrder sortOrder, + DocValueFormat format, + int bucketSize, + BucketedSort.ExtraData extra ) { - assertNotNull(context); + throw new UnsupportedOperationException(); } - searchSourceBuilder.scriptField( - "anotherScriptField", - new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) - ); - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ); - assertEquals( - "Trying to retrieve too many script_fields. Must be less than or equal to: [" - + maxScriptFields - + "] but was [" - + (maxScriptFields + 1) - + "]. This limit can be set by changing the [index.max_script_fields] index level setting.", - ex.getMessage() - ); - } - } - - public void testIgnoreScriptfieldIfSizeZero() throws IOException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - searchSourceBuilder.scriptField( - "field" + 0, - new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) - ); - searchSourceBuilder.size(0); - final ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ) { - assertEquals(0, context.scriptFields().fields().size()); - } + }; } - /** - * test that creating more than the allowed number of scroll contexts throws an exception - */ - public void testMaxOpenScrollContexts() throws Exception { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - // Open all possible scrolls, clear some of them, then open more until the limit is reached - LinkedList clearScrollIds = new LinkedList<>(); - - for (int i = 0; i < SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); i++) { - assertResponse(client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), searchResponse -> { - if (randomInt(4) == 0) clearScrollIds.addLast(searchResponse.getScrollId()); - }); - } + private static SearchService.CanMatchContext createCanMatchContext( + ShardSearchRequest shardRequest, + IndexShard indexShard, + SearchExecutionContext searchExecutionContext, + XContentParserConfiguration parserConfig, + boolean throwException + ) { + return new SearchService.CanMatchContext(shardRequest, null, null, -1, -1) { + @Override + IndexShard getShard() { + return indexShard; + } - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.setScrollIds(clearScrollIds); - client().clearScroll(clearScrollRequest).get(); + @Override + QueryRewriteContext getQueryRewriteContext(IndexService indexService) { + if (throwException) { + throw new IllegalArgumentException(); + } + return new QueryRewriteContext(parserConfig, null, System::currentTimeMillis); + } - for (int i = 0; i < clearScrollIds.size(); i++) { - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)).get().decRef(); - } + @Override + SearchExecutionContext getSearchExecutionContext(Engine.Searcher searcher) { + return searchExecutionContext; + } - final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); - ElasticsearchException ex = expectThrows( - ElasticsearchException.class, - () -> service.createAndPutReaderContext( - request, - indexService, - indexShard, - indexShard.acquireSearcherSupplier(), - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ) - ); - assertEquals( - "Trying to create too many scroll contexts. Must be less than or equal to: [" - + SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY) - + "]. " - + "This limit can be set by changing the [search.max_open_scroll_context] setting.", - ex.getMessage() - ); - assertEquals(RestStatus.TOO_MANY_REQUESTS, ex.status()); - - service.freeAllScrollContexts(); - } - - public void testOpenScrollContextsConcurrently() throws Exception { - createIndex("index"); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - final int maxScrollContexts = SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); - final SearchService searchService = getInstanceFromNode(SearchService.class); - Thread[] threads = new Thread[randomIntBetween(2, 8)]; - CountDownLatch latch = new CountDownLatch(threads.length); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - latch.countDown(); - try { - latch.await(); - for (;;) { - final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); - try { - final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); - searchService.createAndPutReaderContext( - request, - indexService, - indexShard, - reader, - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ); - } catch (ElasticsearchException e) { - assertThat( - e.getMessage(), - equalTo( - "Trying to create too many scroll contexts. Must be less than or equal to: " - + "[" - + maxScrollContexts - + "]. " - + "This limit can be set by changing the [search.max_open_scroll_context] setting." - ) - ); - return; - } - } - } catch (Exception e) { - throw new AssertionError(e); - } - }); - threads[i].setName("elasticsearch[node_s_0][search]"); - threads[i].start(); - } - for (Thread thread : threads) { - thread.join(); - } - assertThat(searchService.getActiveContexts(), equalTo(maxScrollContexts)); - searchService.freeAllScrollContexts(); - } - - public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { - @Override - public List> getQueries() { - return singletonList(new QuerySpec<>("fail_on_rewrite_query", FailOnRewriteQueryBuilder::new, parseContext -> { - throw new UnsupportedOperationException("No query parser for this plugin"); - })); - } - } - - public static class FailOnRewriteQueryBuilder extends DummyQueryBuilder { - - public FailOnRewriteQueryBuilder(StreamInput in) throws IOException { - super(in); - } - - public FailOnRewriteQueryBuilder() {} - - @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { - if (queryRewriteContext.convertToSearchExecutionContext() != null) { - throw new IllegalStateException("Fail on rewrite phase"); - } - return this; - } - } - - private static class ShardScrollRequestTest extends ShardSearchRequest { - private Scroll scroll; - - ShardScrollRequestTest(ShardId shardId) { - super( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(true), - shardId, - 0, - 1, - AliasFilter.EMPTY, - 1f, - -1, - null - ); - this.scroll = new Scroll(TimeValue.timeValueMinutes(1)); - } - - @Override - public Scroll scroll() { - return this.scroll; - } - } - - public void testCanMatch() throws Exception { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source(new SearchSourceBuilder()); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) - ); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - searchRequest.source( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) - ); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); - assertFalse( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - assertEquals(5, numWrapInvocations.get()); - - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - - /* - * Checks that canMatch takes into account the alias filter - */ - // the source cannot be rewritten to a match_none - searchRequest.indices("alias").source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); - assertFalse( - service.canMatch( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), - 1f, - -1, - null - ) - ).canMatch() - ); - // the source can match and can be rewritten to a match_none, but not the alias filter - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - searchRequest.indices("alias").source(new SearchSourceBuilder().query(new TermQueryBuilder("id", "1"))); - assertFalse( - service.canMatch( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), - 1f, - -1, - null - ) - ).canMatch() - ); - - CountDownLatch latch = new CountDownLatch(1); - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - // Because the foo field used in alias filter is unmapped the term query builder rewrite can resolve to a match no docs query, - // without acquiring a searcher and that means the wrapper is not called - assertEquals(5, numWrapInvocations.get()); - service.executeQueryPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - // make sure that the wrapper is called when the query is actually executed - assertEquals(6, numWrapInvocations.get()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - - public void testCanRewriteToMatchNone() { - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) - ) - ); - assertFalse(SearchService.canRewriteToMatchNone(new SearchSourceBuilder())); - assertFalse(SearchService.canRewriteToMatchNone(null)); - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) - ) - ); - assertTrue(SearchService.canRewriteToMatchNone(new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")))); - assertTrue( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) - ) - ); - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) - .suggest(new SuggestBuilder()) - ) - ); - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")).suggest(new SuggestBuilder()) - ) - ); - } - - public void testSetSearchThrottled() throws IOException { - createIndex("throttled_threadpool_index"); - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( - "throttled_threadpool_index", - IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), - "true" - ) - ).actionGet(); - final SearchService service = getInstanceFromNode(SearchService.class); - Index index = resolveIndex("throttled_threadpool_index"); - assertTrue(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); - prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertSearchHits( - client().prepareSearch("throttled_threadpool_index") - .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) - .setSize(1), - "1" - ); - // we add a search action listener in a plugin above to assert that this is actually used - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( - "throttled_threadpool_index", - IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), - "false" - ) - ).actionGet(); - - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("throttled_threadpool_index") - .setSettings(Settings.builder().put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), false)) - .get() - ); - assertEquals("can not update private setting [index.search.throttled]; this setting is managed by Elasticsearch", iae.getMessage()); - assertFalse(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); - } - - public void testAggContextGetsMatchAll() throws IOException { - createIndex("test"); - withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); - } - - public void testAggContextGetsNestedFilter() throws IOException { - XContentBuilder mapping = JsonXContent.contentBuilder().startObject().startObject("properties"); - mapping.startObject("nested").field("type", "nested").endObject(); - mapping.endObject().endObject(); - - createIndex("test", Settings.EMPTY, mapping); - withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); - } - - /** - * Build an {@link AggregationContext} with the named index. - */ - private void withAggregationContext(String index, Consumer check) throws IOException { - IndexService indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(resolveIndex(index)); - ShardId shardId = new ShardId(indexService.index(), 0); - - SearchRequest request = new SearchRequest().indices(index) - .source(new SearchSourceBuilder().aggregation(new FiltersAggregationBuilder("test", new MatchAllQueryBuilder()))) - .allowPartialSearchResults(false); - ShardSearchRequest shardRequest = new ShardSearchRequest( - OriginalIndices.NONE, - request, - shardId, - 0, - 1, - AliasFilter.EMPTY, - 1, - 0, - null - ); - - try (ReaderContext readerContext = createReaderContext(indexService, indexService.getShard(0))) { - try ( - SearchContext context = getInstanceFromNode(SearchService.class).createContext( - readerContext, - shardRequest, - mock(SearchShardTask.class), - ResultsType.QUERY, - true - ) - ) { - check.accept(context.aggregations().factories().context()); - } - } - } - - public void testExpandSearchThrottled() { - createIndex("throttled_threadpool_index"); - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( - "throttled_threadpool_index", - IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), - "true" - ) - ).actionGet(); - - prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch(), 1L); - assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); - } - - public void testExpandSearchFrozen() { - String indexName = "frozen_index"; - createIndex(indexName); - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request(indexName, "index.frozen", "true") - ).actionGet(); - - prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch(), 0L); - assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); - assertWarnings(TransportSearchAction.FROZEN_INDICES_DEPRECATION_MESSAGE.replace("{}", indexName)); - } - - public void testCreateReduceContext() { - SearchService service = getInstanceFromNode(SearchService.class); - AggregationReduceContext.Builder reduceContextBuilder = service.aggReduceContextBuilder( - () -> false, - new SearchRequest().source(new SearchSourceBuilder()).source().aggregations() - ); - { - AggregationReduceContext reduceContext = reduceContextBuilder.forFinalReduction(); - expectThrows( - MultiBucketConsumerService.TooManyBucketsException.class, - () -> reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1) - ); - } - { - AggregationReduceContext reduceContext = reduceContextBuilder.forPartialReduction(); - reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1); - } - } - - public void testMultiBucketConsumerServiceCB() { - MultiBucketConsumerService service = new MultiBucketConsumerService( - getInstanceFromNode(ClusterService.class), - Settings.EMPTY, - new NoopCircuitBreaker("test") { - - @Override - public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { - throw new CircuitBreakingException("tripped", getDurability()); - } - } - ); - // for partial - { - IntConsumer consumer = service.createForPartial(); - for (int i = 0; i < 1023; i++) { - consumer.accept(0); - } - CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); - assertThat(ex.getMessage(), equalTo("tripped")); - } - // for final - { - IntConsumer consumer = service.createForFinal(); - for (int i = 0; i < 1023; i++) { - consumer.accept(0); - } - CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); - assertThat(ex.getMessage(), equalTo("tripped")); - } - } - - public void testCreateSearchContext() throws IOException { - String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); - IndexService indexService = createIndex(index); - final SearchService service = getInstanceFromNode(SearchService.class); - ShardId shardId = new ShardId(indexService.index(), 0); - long nowInMillis = System.currentTimeMillis(); - String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); - SearchRequest searchRequest = new SearchRequest(); - searchRequest.allowPartialSearchResults(randomBoolean()); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - shardId, - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - nowInMillis, - clusterAlias - ); - try (SearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { - SearchShardTarget searchShardTarget = searchContext.shardTarget(); - SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); - String expectedIndexName = clusterAlias == null ? index : clusterAlias + ":" + index; - assertEquals(expectedIndexName, searchExecutionContext.getFullyQualifiedIndex().getName()); - assertEquals(expectedIndexName, searchShardTarget.getFullyQualifiedIndexName()); - assertEquals(clusterAlias, searchShardTarget.getClusterAlias()); - assertEquals(shardId, searchShardTarget.getShardId()); - - assertNull(searchContext.dfsResult()); - searchContext.addDfsResult(); - assertSame(searchShardTarget, searchContext.dfsResult().getSearchShardTarget()); - - assertNull(searchContext.queryResult()); - searchContext.addQueryResult(); - assertSame(searchShardTarget, searchContext.queryResult().getSearchShardTarget()); - - assertNull(searchContext.fetchResult()); - searchContext.addFetchResult(); - assertSame(searchShardTarget, searchContext.fetchResult().getSearchShardTarget()); - } - } - - /** - * While we have no NPE in DefaultContext constructor anymore, we still want to guard against it (or other failures) in the future to - * avoid leaking searchers. - */ - public void testCreateSearchContextFailure() throws Exception { - final String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); - final IndexService indexService = createIndex(index); - final SearchService service = getInstanceFromNode(SearchService.class); - final ShardId shardId = new ShardId(indexService.index(), 0); - final ShardSearchRequest request = new ShardSearchRequest(shardId, 0, null) { @Override - public SearchType searchType() { - // induce an artificial NPE - throw new NullPointerException("expected"); + IndexService getIndexService() { + // it's ok to return null because the three above methods are overridden + return null; } }; - try (ReaderContext reader = createReaderContext(indexService, indexService.getShard(shardId.id()))) { - NullPointerException e = expectThrows( - NullPointerException.class, - () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ); - assertEquals("expected", e.getMessage()); - } - // Needs to busily assert because Engine#refreshNeeded can increase the refCount. - assertBusy( - () -> assertEquals("should have 2 store refs (IndexService + InternalEngine)", 2, indexService.getShard(0).store().refCount()) - ); - } - - public void testMatchNoDocsEmptyResponse() throws InterruptedException { - createIndex("index"); - Thread currentThread = Thread.currentThread(); - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) - .source(new SearchSourceBuilder().aggregation(AggregationBuilders.count("count").field("value"))); - ShardSearchRequest shardRequest = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 5, - AliasFilter.EMPTY, - 1.0f, - 0, - null - ); - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - - { - CountDownLatch latch = new CountDownLatch(1); - shardRequest.source().query(new MatchAllQueryBuilder()); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult result) { - try { - assertNotSame(Thread.currentThread(), currentThread); - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - assertThat(result, instanceOf(QuerySearchResult.class)); - assertFalse(result.queryResult().isNull()); - assertNotNull(result.queryResult().topDocs()); - assertNotNull(result.queryResult().aggregations()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception exc) { - try { - throw new AssertionError(exc); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - - { - CountDownLatch latch = new CountDownLatch(1); - shardRequest.source().query(new MatchNoneQueryBuilder()); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult result) { - try { - assertNotSame(Thread.currentThread(), currentThread); - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - assertThat(result, instanceOf(QuerySearchResult.class)); - assertFalse(result.queryResult().isNull()); - assertNotNull(result.queryResult().topDocs()); - assertNotNull(result.queryResult().aggregations()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception exc) { - try { - throw new AssertionError(exc); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - - { - CountDownLatch latch = new CountDownLatch(1); - shardRequest.canReturnNullResponseIfMatchNoDocs(true); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult result) { - try { - // make sure we don't use the search threadpool - assertSame(Thread.currentThread(), currentThread); - assertThat(result, instanceOf(QuerySearchResult.class)); - assertTrue(result.queryResult().isNull()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - } - - public void testDeleteIndexWhileSearch() throws Exception { - createIndex("test"); - int numDocs = randomIntBetween(1, 20); - for (int i = 0; i < numDocs; i++) { - prepareIndex("test").setSource("f", "v").get(); - } - indicesAdmin().prepareRefresh("test").get(); - AtomicBoolean stopped = new AtomicBoolean(false); - Thread[] searchers = new Thread[randomIntBetween(1, 4)]; - CountDownLatch latch = new CountDownLatch(searchers.length); - for (int i = 0; i < searchers.length; i++) { - searchers[i] = new Thread(() -> { - latch.countDown(); - while (stopped.get() == false) { - try { - client().prepareSearch("test").setRequestCache(false).get().decRef(); - } catch (Exception ignored) { - return; - } - } - }); - searchers[i].start(); - } - latch.await(); - indicesAdmin().prepareDelete("test").get(); - stopped.set(true); - for (Thread searcher : searchers) { - searcher.join(); - } - } - - public void testLookUpSearchContext() throws Exception { - createIndex("index"); - SearchService searchService = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - List contextIds = new ArrayList<>(); - int numContexts = randomIntBetween(1, 10); - CountDownLatch latch = new CountDownLatch(1); - indexShard.getThreadPool().executor(ThreadPool.Names.SEARCH).execute(() -> { - try { - for (int i = 0; i < numContexts; i++) { - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(true), - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - final ReaderContext context = searchService.createAndPutReaderContext( - request, - indexService, - indexShard, - indexShard.acquireSearcherSupplier(), - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ); - assertThat(context.id().getId(), equalTo((long) (i + 1))); - contextIds.add(context.id()); - } - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - while (contextIds.isEmpty() == false) { - final ShardSearchContextId contextId = randomFrom(contextIds); - assertFalse(searchService.freeReaderContext(new ShardSearchContextId(UUIDs.randomBase64UUID(), contextId.getId()))); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - if (randomBoolean()) { - assertTrue(searchService.freeReaderContext(contextId)); - } else { - assertTrue( - searchService.freeReaderContext((new ShardSearchContextId(contextId.getSessionId(), contextId.getId()))) - ); - } - contextIds.remove(contextId); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - assertFalse(searchService.freeReaderContext(contextId)); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - } - } finally { - latch.countDown(); - } - }); - latch.await(); - } - - public void testOpenReaderContext() { - createIndex("index"); - SearchService searchService = getInstanceFromNode(SearchService.class); - PlainActionFuture future = new PlainActionFuture<>(); - searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - future.actionGet(); - assertThat(searchService.getActiveContexts(), equalTo(1)); - assertTrue(searchService.freeReaderContext(future.actionGet())); - } - - public void testCancelQueryPhaseEarly() throws Exception { - createIndex("index"); - final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - - CountDownLatch latch1 = new CountDownLatch(1); - SearchShardTask task = new SearchShardTask(1, "", "", "", TaskId.EMPTY_TASK_ID, emptyMap()); - service.executeQueryPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - service.freeReaderContext(searchPhaseResult.getContextId()); - latch1.countDown(); - } - - @Override - public void onFailure(Exception e) { - try { - fail("Search should not be cancelled"); - } finally { - latch1.countDown(); - } - } - }); - latch1.await(); - - CountDownLatch latch2 = new CountDownLatch(1); - service.executeDfsPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - service.freeReaderContext(searchPhaseResult.getContextId()); - latch2.countDown(); - } - - @Override - public void onFailure(Exception e) { - try { - fail("Search should not be cancelled"); - } finally { - latch2.countDown(); - } - } - }); - latch2.await(); - - AtomicBoolean searchContextCreated = new AtomicBoolean(false); - service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); - CountDownLatch latch3 = new CountDownLatch(1); - TaskCancelHelper.cancel(task, "simulated"); - service.executeQueryPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - fail("Search not cancelled early"); - } finally { - service.freeReaderContext(searchPhaseResult.getContextId()); - searchPhaseResult.decRef(); - latch3.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - assertThat(e, is(instanceOf(TaskCancelledException.class))); - assertThat(e.getMessage(), is("task cancelled [simulated]")); - assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); - assertThat(searchContextCreated.get(), is(false)); - latch3.countDown(); - } - }); - latch3.await(); - - searchContextCreated.set(false); - CountDownLatch latch4 = new CountDownLatch(1); - service.executeDfsPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - fail("Search not cancelled early"); - } finally { - service.freeReaderContext(searchPhaseResult.getContextId()); - latch4.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - assertThat(e, is(instanceOf(TaskCancelledException.class))); - assertThat(e.getMessage(), is("task cancelled [simulated]")); - assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); - assertThat(searchContextCreated.get(), is(false)); - latch4.countDown(); - } - }); - latch4.await(); - } - - public void testCancelFetchPhaseEarly() throws Exception { - createIndex("index"); - final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - - AtomicBoolean searchContextCreated = new AtomicBoolean(false); - service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); - - // Test fetch phase is cancelled early - String scrollId; - var searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); - try { - scrollId = searchResponse.getScrollId(); - } finally { - searchResponse.decRef(); - } - - client().searchScroll(new SearchScrollRequest(scrollId)).get().decRef(); - assertThat(searchContextCreated.get(), is(true)); - - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.addScrollId(scrollId); - client().clearScroll(clearScrollRequest); - - searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); - try { - scrollId = searchResponse.getScrollId(); - } finally { - searchResponse.decRef(); - } - searchContextCreated.set(false); - service.setOnCheckCancelled(t -> { - SearchShardTask task = new SearchShardTask(randomLong(), "transport", "action", "", TaskId.EMPTY_TASK_ID, emptyMap()); - TaskCancelHelper.cancel(task, "simulated"); - return task; - }); - CountDownLatch latch = new CountDownLatch(1); - client().searchScroll(new SearchScrollRequest(scrollId), new ActionListener<>() { - @Override - public void onResponse(SearchResponse searchResponse) { - try { - fail("Search not cancelled early"); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - Throwable cancelledExc = e.getCause().getCause(); - assertThat(cancelledExc, is(instanceOf(TaskCancelledException.class))); - assertThat(cancelledExc.getMessage(), is("task cancelled [simulated]")); - assertThat(((TaskCancelledException) cancelledExc).status(), is(RestStatus.BAD_REQUEST)); - latch.countDown(); - } - }); - latch.await(); - assertThat(searchContextCreated.get(), is(false)); - - clearScrollRequest.setScrollIds(singletonList(scrollId)); - client().clearScroll(clearScrollRequest); - } - - public void testWaitOnRefresh() throws ExecutionException, InterruptedException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - PlainActionFuture future = new PlainActionFuture<>(); - service.executeQueryPhase(request, task, future.delegateFailure((l, r) -> { - assertEquals(1, r.queryResult().getTotalHits().value); - l.onResponse(null); - })); - future.get(); - } - - public void testWaitOnRefreshFailsWithRefreshesDisabled() { - createIndex("index", Settings.builder().put("index.refresh_interval", "-1").build()); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - PlainActionFuture future = new PlainActionFuture<>(); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - service.executeQueryPhase(request, task, future); - IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, future::actionGet); - assertThat( - illegalArgumentException.getMessage(), - containsString("Cannot use wait_for_checkpoints with [index.refresh_interval=-1]") - ); - } - - public void testWaitOnRefreshFailsIfCheckpointNotIndexed() { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - // Increased timeout to avoid cancelling the search task prior to its completion, - // as we expect to raise an Exception. Timeout itself is tested on the following `testWaitOnRefreshTimeout` test. - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(200, 300))); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 1 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - PlainActionFuture future = new PlainActionFuture<>(); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - service.executeQueryPhase(request, task, future); - - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, future::actionGet); - assertThat( - ex.getMessage(), - containsString("Cannot wait for unissued seqNo checkpoint [wait_for_checkpoint=1, max_issued_seqNo=0]") - ); - } - - public void testWaitOnRefreshTimeout() { - createIndex("index", Settings.builder().put("index.refresh_interval", "60s").build()); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(10, 100))); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - PlainActionFuture future = new PlainActionFuture<>(); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - service.executeQueryPhase(request, task, future); - - ElasticsearchTimeoutException ex = expectThrows(ElasticsearchTimeoutException.class, future::actionGet); - assertThat(ex.getMessage(), containsString("Wait for seq_no [0] refreshed timed out [")); - } - - public void testMinimalSearchSourceInShardRequests() { - createIndex("test"); - int numDocs = between(0, 10); - for (int i = 0; i < numDocs; i++) { - prepareIndex("test").setSource("id", Integer.toString(i)).get(); - } - indicesAdmin().prepareRefresh("test").get(); - - BytesReference pitId = client().execute( - TransportOpenPointInTimeAction.TYPE, - new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(10)) - ).actionGet().getPointInTimeId(); - final MockSearchService searchService = (MockSearchService) getInstanceFromNode(SearchService.class); - final List shardRequests = new CopyOnWriteArrayList<>(); - searchService.setOnCreateSearchContext(ctx -> shardRequests.add(ctx.request())); - try { - assertHitCount( - client().prepareSearch() - .setSource( - new SearchSourceBuilder().size(between(numDocs, numDocs * 2)).pointInTimeBuilder(new PointInTimeBuilder(pitId)) - ), - numDocs - ); - } finally { - client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); - } - assertThat(shardRequests, not(emptyList())); - for (ShardSearchRequest shardRequest : shardRequests) { - assertNotNull(shardRequest.source()); - assertNotNull(shardRequest.source().pointInTimeBuilder()); - assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo(BytesArray.EMPTY)); - } - } - - public void testDfsQueryPhaseRewrite() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.source(SearchSourceBuilder.searchSource().query(new TestRewriteCounterQueryBuilder())); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); - ReaderContext context = service.createAndPutReaderContext( - request, - indexService, - indexShard, - reader, - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ); - PlainActionFuture plainActionFuture = new PlainActionFuture<>(); - service.executeQueryPhase( - new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), - new SearchShardTask(42L, "", "", "", null, emptyMap()), - plainActionFuture - ); - - plainActionFuture.actionGet(); - assertThat(((TestRewriteCounterQueryBuilder) request.source().query()).asyncRewriteCount, equalTo(1)); - final ShardSearchContextId contextId = context.id(); - assertTrue(service.freeReaderContext(contextId)); - } - - public void testEnableSearchWorkerThreads() throws IOException { - IndexService indexService = createIndex("index", Settings.EMPTY); - IndexShard indexShard = indexService.getShard(0); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(randomBoolean()), - indexShard.shardId(), - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - System.currentTimeMillis(), - null - ); - try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { - SearchService service = getInstanceFromNode(SearchService.class); - SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); - - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { - assertNotNull(searchContext.searcher().getExecutor()); - } - - try { - ClusterUpdateSettingsResponse response = client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().put(SEARCH_WORKER_THREADS_ENABLED.getKey(), false).build()) - .get(); - assertTrue(response.isAcknowledged()); - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { - assertNull(searchContext.searcher().getExecutor()); - } - } finally { - // reset original default setting - client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().putNull(SEARCH_WORKER_THREADS_ENABLED.getKey()).build()) - .get(); - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { - assertNotNull(searchContext.searcher().getExecutor()); - } - } - } - } - - /** - * Verify that a single slice is created for requests that don't support parallel collection, while an executor is still - * provided to the searcher to parallelize other operations. Also ensure multiple slices are created for requests that do support - * parallel collection. - */ - public void testSlicingBehaviourForParallelCollection() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY); - ThreadPoolExecutor executor = (ThreadPoolExecutor) indexService.getThreadPool().executor(ThreadPool.Names.SEARCH); - final int configuredMaxPoolSize = 10; - executor.setMaximumPoolSize(configuredMaxPoolSize); // We set this explicitly to be independent of CPU cores. - int numDocs = randomIntBetween(50, 100); - for (int i = 0; i < numDocs; i++) { - prepareIndex("index").setId(String.valueOf(i)).setSource("field", "value").get(); - if (i % 5 == 0) { - indicesAdmin().prepareRefresh("index").get(); - } - } - final IndexShard indexShard = indexService.getShard(0); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(randomBoolean()), - indexShard.shardId(), - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - System.currentTimeMillis(), - null - ); - SearchService service = getInstanceFromNode(SearchService.class); - NonCountingTermQuery termQuery = new NonCountingTermQuery(new Term("field", "value")); - assertEquals(0, executor.getCompletedTaskCount()); - try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { - SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertNotNull(searcher.getExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices( - searcher.getIndexReader().leaves(), - maxPoolSize, - 1 - ).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "DFS supports parallel collection, so the number of slices should be > 1.", - expectedSlices - 1, // one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertNotNull(searcher.getExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices( - searcher.getIndexReader().leaves(), - maxPoolSize, - 1 - ).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", - expectedSlices - 1, // one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.FETCH, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertNull(searcher.getExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 as FETCH does not support parallel collection and thus runs on the calling" - + " thread.", - 0, - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.NONE, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertNull(searcher.getExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 as NONE does not support parallel collection.", - 0, // zero since one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - - try { - ClusterUpdateSettingsResponse response = client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) - .get(); - assertTrue(response.isAcknowledged()); - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertNull(searcher.getExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 when QUERY parallel collection is disabled.", - 0, // zero since one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - } finally { - // Reset to the original default setting and check to ensure it takes effect. - client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) - .get(); - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertNotNull(searcher.getExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices( - searcher.getIndexReader().leaves(), - maxPoolSize, - 1 - ).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager()); - assertBusy( - () -> assertEquals( - "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", - expectedSlices - 1, // one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - } - } - } - - /** - * This method tests validation that happens on the data nodes, which is now performed on the coordinating node. - * We still need the validation to cover for mixed cluster scenarios where the coordinating node does not perform the check yet. - */ - public void testParseSourceValidation() { - String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); - IndexService indexService = createIndex(index); - final SearchService service = getInstanceFromNode(SearchService.class); - { - // scroll and search_after - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.scroll(new TimeValue(1000)); - searchRequest.source().searchAfter(new String[] { "value" }); - assertCreateContextValidation(searchRequest, "`search_after` cannot be used in a scroll context.", indexService, service); - } - { - // scroll and collapse - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.scroll(new TimeValue(1000)); - searchRequest.source().collapse(new CollapseBuilder("field")); - assertCreateContextValidation(searchRequest, "cannot use `collapse` in a scroll context", indexService, service); - } - { - // search_after and `from` isn't valid - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.source().searchAfter(new String[] { "value" }); - searchRequest.source().from(10); - assertCreateContextValidation( - searchRequest, - "`from` parameter must be set to 0 when `search_after` is used", - indexService, - service - ); - } - { - // slice without scroll or pit - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.source().slice(new SliceBuilder(1, 10)); - assertCreateContextValidation( - searchRequest, - "[slice] can only be used with [scroll] or [point-in-time] requests", - indexService, - service - ); - } - { - // stored fields disabled with _source requested - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.source().storedField("_none_"); - searchRequest.source().fetchSource(true); - assertCreateContextValidation( - searchRequest, - "[stored_fields] cannot be disabled if [_source] is requested", - indexService, - service - ); - } - { - // stored fields disabled with fetch fields requested - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.source().storedField("_none_"); - searchRequest.source().fetchSource(false); - searchRequest.source().fetchField("field"); - assertCreateContextValidation( - searchRequest, - "[stored_fields] cannot be disabled when using the [fields] option", - indexService, - service - ); - } - } - - private static void assertCreateContextValidation( - SearchRequest searchRequest, - String errorMessage, - IndexService indexService, - SearchService searchService - ) { - ShardId shardId = new ShardId(indexService.index(), 0); - long nowInMillis = System.currentTimeMillis(); - String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); - searchRequest.allowPartialSearchResults(randomBoolean()); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - shardId, - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - nowInMillis, - clusterAlias - ); - - SearchShardTask task = new SearchShardTask(1, "type", "action", "description", null, emptyMap()); - - ReaderContext readerContext = null; - try { - ReaderContext createOrGetReaderContext = searchService.createOrGetReaderContext(request); - readerContext = createOrGetReaderContext; - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> searchService.createContext(createOrGetReaderContext, request, task, ResultsType.QUERY, randomBoolean()) - ); - assertThat(exception.getMessage(), containsString(errorMessage)); - } finally { - if (readerContext != null) { - readerContext.close(); - searchService.freeReaderContext(readerContext.id()); - } - } - } - - private static ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { - return new ReaderContext( - new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()), - indexService, - indexShard, - indexShard.acquireSearcherSupplier(), - randomNonNegativeLong(), - false - ); - } - - private static class TestRewriteCounterQueryBuilder extends AbstractQueryBuilder { - - final int asyncRewriteCount; - final Supplier fetched; - - TestRewriteCounterQueryBuilder() { - asyncRewriteCount = 0; - fetched = null; - } - - private TestRewriteCounterQueryBuilder(int asyncRewriteCount, Supplier fetched) { - this.asyncRewriteCount = asyncRewriteCount; - this.fetched = fetched; - } - - @Override - public String getWriteableName() { - return "test_query"; - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ZERO; - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException {} - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException {} - - @Override - protected Query doToQuery(SearchExecutionContext context) throws IOException { - return new MatchAllDocsQuery(); - } - - @Override - protected boolean doEquals(TestRewriteCounterQueryBuilder other) { - return true; - } - - @Override - protected int doHashCode() { - return 42; - } - - @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - if (asyncRewriteCount > 0) { - return this; - } - if (fetched != null) { - if (fetched.get() == null) { - return this; - } - assert fetched.get(); - return new TestRewriteCounterQueryBuilder(1, null); - } - if (queryRewriteContext.convertToDataRewriteContext() != null) { - SetOnce awaitingFetch = new SetOnce<>(); - queryRewriteContext.registerAsyncAction((c, l) -> { - awaitingFetch.set(true); - l.onResponse(null); - }); - return new TestRewriteCounterQueryBuilder(0, awaitingFetch::get); - } - return this; - } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnWhitelistedFunctionTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnWhitelistedFunctionTests.java index 69173957aebab..3bc458880db0a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnWhitelistedFunctionTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnWhitelistedFunctionTests.java @@ -326,7 +326,7 @@ public void testLinearMovAvg() { } double avg = 0; - long totalWeight = 1; + long totalWeight = 0; long current = 1; for (double value : window) { diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java index c63c65a750d7c..cc268a6021cb3 100644 --- a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java @@ -8,7 +8,6 @@ */ package fixture.aws.imds; -import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import org.junit.rules.ExternalResource; @@ -17,29 +16,14 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Objects; -import java.util.Set; -import java.util.function.BiConsumer; public class Ec2ImdsHttpFixture extends ExternalResource { + private final Ec2ImdsServiceBuilder ec2ImdsServiceBuilder; private HttpServer server; - private final Ec2ImdsVersion ec2ImdsVersion; - private final BiConsumer newCredentialsConsumer; - private final Set alternativeCredentialsEndpoints; - - public Ec2ImdsHttpFixture( - Ec2ImdsVersion ec2ImdsVersion, - BiConsumer newCredentialsConsumer, - Set alternativeCredentialsEndpoints - ) { - this.ec2ImdsVersion = Objects.requireNonNull(ec2ImdsVersion); - this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); - this.alternativeCredentialsEndpoints = Objects.requireNonNull(alternativeCredentialsEndpoints); - } - - protected HttpHandler createHandler() { - return new Ec2ImdsHttpHandler(ec2ImdsVersion, newCredentialsConsumer, alternativeCredentialsEndpoints); + public Ec2ImdsHttpFixture(Ec2ImdsServiceBuilder ec2ImdsServiceBuilder) { + this.ec2ImdsServiceBuilder = ec2ImdsServiceBuilder; } public String getAddress() { @@ -52,7 +36,7 @@ public void stop(int delay) { protected void before() throws Throwable { server = HttpServer.create(resolveAddress(), 0); - server.createContext("/", Objects.requireNonNull(createHandler())); + server.createContext("/", Objects.requireNonNull(ec2ImdsServiceBuilder.buildHandler())); server.start(); } diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java index 281465b96de05..fd2044357257b 100644 --- a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java @@ -26,6 +26,7 @@ import java.util.Objects; import java.util.Set; import java.util.function.BiConsumer; +import java.util.function.Supplier; import static org.elasticsearch.test.ESTestCase.randomIdentifier; import static org.elasticsearch.test.ESTestCase.randomSecretKey; @@ -43,15 +44,18 @@ public class Ec2ImdsHttpHandler implements HttpHandler { private final BiConsumer newCredentialsConsumer; private final Set validCredentialsEndpoints = ConcurrentCollections.newConcurrentSet(); + private final Supplier availabilityZoneSupplier; public Ec2ImdsHttpHandler( Ec2ImdsVersion ec2ImdsVersion, BiConsumer newCredentialsConsumer, - Collection alternativeCredentialsEndpoints + Collection alternativeCredentialsEndpoints, + Supplier availabilityZoneSupplier ) { this.ec2ImdsVersion = Objects.requireNonNull(ec2ImdsVersion); this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); this.validCredentialsEndpoints.addAll(alternativeCredentialsEndpoints); + this.availabilityZoneSupplier = availabilityZoneSupplier; } @Override @@ -98,6 +102,13 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); return; + } else if (path.equals("/latest/meta-data/placement/availability-zone")) { + final var availabilityZone = availabilityZoneSupplier.get(); + final byte[] response = availabilityZone.getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "text/plain"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + return; } else if (validCredentialsEndpoints.contains(path)) { final String accessKey = randomIdentifier(); final String sessionToken = randomIdentifier(); diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsServiceBuilder.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsServiceBuilder.java new file mode 100644 index 0000000000000..bca43da8683b6 --- /dev/null +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsServiceBuilder.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.aws.imds; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Collection; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +public class Ec2ImdsServiceBuilder { + + private final Ec2ImdsVersion ec2ImdsVersion; + private BiConsumer newCredentialsConsumer = Ec2ImdsServiceBuilder::rejectNewCredentials; + private Collection alternativeCredentialsEndpoints = Set.of(); + private Supplier availabilityZoneSupplier = Ec2ImdsServiceBuilder::rejectAvailabilityZone; + + public Ec2ImdsServiceBuilder(Ec2ImdsVersion ec2ImdsVersion) { + this.ec2ImdsVersion = ec2ImdsVersion; + } + + public Ec2ImdsServiceBuilder newCredentialsConsumer(BiConsumer newCredentialsConsumer) { + this.newCredentialsConsumer = newCredentialsConsumer; + return this; + } + + private static void rejectNewCredentials(String ignored1, String ignored2) { + ESTestCase.fail("credentials creation not supported"); + } + + public Ec2ImdsServiceBuilder alternativeCredentialsEndpoints(Collection alternativeCredentialsEndpoints) { + this.alternativeCredentialsEndpoints = alternativeCredentialsEndpoints; + return this; + } + + private static String rejectAvailabilityZone() { + return ESTestCase.fail(null, "availability zones not supported"); + } + + public Ec2ImdsServiceBuilder availabilityZoneSupplier(Supplier availabilityZoneSupplier) { + this.availabilityZoneSupplier = availabilityZoneSupplier; + return this; + } + + public Ec2ImdsHttpHandler buildHandler() { + return new Ec2ImdsHttpHandler(ec2ImdsVersion, newCredentialsConsumer, alternativeCredentialsEndpoints, availabilityZoneSupplier); + } + +} diff --git a/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java index bb613395a0fba..6d3eb3d14e9b2 100644 --- a/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java +++ b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java @@ -30,6 +30,7 @@ import java.net.InetSocketAddress; import java.net.URI; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -43,7 +44,7 @@ public class Ec2ImdsHttpHandlerTests extends ESTestCase { public void testImdsV1() throws IOException { final Map generatedCredentials = new HashMap<>(); - final var handler = new Ec2ImdsHttpHandler(Ec2ImdsVersion.V1, generatedCredentials::put, Set.of()); + final var handler = new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(generatedCredentials::put).buildHandler(); final var roleResponse = handleRequest(handler, "GET", SECURITY_CREDENTIALS_URI); assertEquals(RestStatus.OK, roleResponse.status()); @@ -66,18 +67,14 @@ public void testImdsV1() throws IOException { public void testImdsV2Disabled() { assertEquals( RestStatus.METHOD_NOT_ALLOWED, - handleRequest( - new Ec2ImdsHttpHandler(Ec2ImdsVersion.V1, (accessKey, sessionToken) -> fail(), Set.of()), - "PUT", - "/latest/api/token" - ).status() + handleRequest(new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).buildHandler(), "PUT", "/latest/api/token").status() ); } public void testImdsV2() throws IOException { final Map generatedCredentials = new HashMap<>(); - final var handler = new Ec2ImdsHttpHandler(Ec2ImdsVersion.V2, generatedCredentials::put, Set.of()); + final var handler = new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).newCredentialsConsumer(generatedCredentials::put).buildHandler(); final var tokenResponse = handleRequest(handler, "PUT", "/latest/api/token"); assertEquals(RestStatus.OK, tokenResponse.status()); @@ -101,6 +98,21 @@ public void testImdsV2() throws IOException { assertEquals(sessionToken, responseMap.get("Token")); } + public void testAvailabilityZone() { + final Set generatedAvailabilityZones = new HashSet<>(); + final var handler = new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).availabilityZoneSupplier(() -> { + final var newAvailabilityZone = randomIdentifier(); + generatedAvailabilityZones.add(newAvailabilityZone); + return newAvailabilityZone; + }).buildHandler(); + + final var availabilityZoneResponse = handleRequest(handler, "GET", "/latest/meta-data/placement/availability-zone"); + assertEquals(RestStatus.OK, availabilityZoneResponse.status()); + final var availabilityZone = availabilityZoneResponse.body().utf8ToString(); + + assertEquals(generatedAvailabilityZones, Set.of(availabilityZone)); + } + private record TestHttpResponse(RestStatus status, BytesReference body) {} private static TestHttpResponse checkImdsV2GetRequest(Ec2ImdsHttpHandler handler, String uri, String token) { diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java index 91875600ec000..0a4c99eb8b52a 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java @@ -45,7 +45,7 @@ public MockPluginsService(Settings settings, Environment environment, Collection super( settings, environment.configFile(), - new PluginsLoader(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap(), Collections.emptySet()) + new PluginsLoader(Collections.emptySet(), Collections.emptySet(), Collections.emptyMap()) ); List pluginsLoaded = new ArrayList<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/internal/rewriter/MockQueryRewriteInterceptor.java b/test/framework/src/main/java/org/elasticsearch/plugins/internal/rewriter/MockQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..196e5bd4f4a2d --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/plugins/internal/rewriter/MockQueryRewriteInterceptor.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.plugins.internal.rewriter; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; + +public class MockQueryRewriteInterceptor implements QueryRewriteInterceptor { + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + return queryBuilder; + } + + @Override + public String getQueryName() { + return this.getClass().getSimpleName(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index bdf323afb8d96..20cb66affddee 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -71,6 +71,8 @@ import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.plugins.internal.rewriter.MockQueryRewriteInterceptor; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptService; @@ -629,7 +631,8 @@ QueryRewriteContext createQueryRewriteContext() { () -> true, scriptService, createMockResolvedIndices(), - null + null, + createMockQueryRewriteInterceptor() ); } @@ -670,5 +673,9 @@ private ResolvedIndices createMockResolvedIndices() { Map.of(index, indexMetadata) ); } + + private QueryRewriteInterceptor createMockQueryRewriteInterceptor() { + return new MockQueryRewriteInterceptor(); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 615393bdbf442..4ba108d944d3d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -73,7 +73,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.action.search.SearchTransportService.FREE_CONTEXT_ACTION_NAME; +import static org.elasticsearch.action.search.SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.test.NodeRoles.dataNode; @@ -477,7 +477,7 @@ protected void ensureNoInitializingShards() { */ protected void ensureAllFreeContextActionsAreConsumed() throws Exception { logger.info("--> waiting for all free_context tasks to complete within a reasonable time"); - safeGet(clusterAdmin().prepareListTasks().setActions(FREE_CONTEXT_ACTION_NAME + "*").setWaitForCompletion(true).execute()); + safeGet(clusterAdmin().prepareListTasks().setActions(FREE_CONTEXT_SCROLL_ACTION_NAME + "*").setWaitForCompletion(true).execute()); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 83c257f12a183..045836d9efee1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1200,10 +1200,30 @@ public static SecureString randomSecureStringOfLength(int codeUnits) { return new SecureString(randomAlpha.toCharArray()); } - public static String randomNullOrAlphaOfLength(int codeUnits) { + public static String randomAlphaOfLengthOrNull(int codeUnits) { return randomBoolean() ? null : randomAlphaOfLength(codeUnits); } + public static Long randomLongOrNull() { + return randomBoolean() ? null : randomLong(); + } + + public static Long randomPositiveLongOrNull() { + return randomBoolean() ? null : randomNonNegativeLong(); + } + + public static Integer randomIntOrNull() { + return randomBoolean() ? null : randomInt(); + } + + public static Integer randomPositiveIntOrNull() { + return randomBoolean() ? null : randomNonNegativeInt(); + } + + public static Float randomFloatOrNull() { + return randomBoolean() ? null : randomFloat(); + } + /** * Creates a valid random identifier such as node id or index name */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java index d86c15aa14bc9..558303f7e0f0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java @@ -104,7 +104,7 @@ public boolean isNeedsActive() { return needsActive; } - /** Create a momentary feature for hte given license level */ + /** Create a momentary feature for the given license level */ public static Momentary momentary(String family, String name, License.OperationMode licenseLevel) { return new Momentary(family, name, licenseLevel, true); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 4f8a18e28aea1..3c7b089b4cd63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -106,6 +106,7 @@ public class XPackLicenseState { messages.put(XPackField.CCR, XPackLicenseState::ccrAcknowledgementMessages); messages.put(XPackField.ENTERPRISE_SEARCH, XPackLicenseState::enterpriseSearchAcknowledgementMessages); messages.put(XPackField.REDACT_PROCESSOR, XPackLicenseState::redactProcessorAcknowledgementMessages); + messages.put(XPackField.ESQL, XPackLicenseState::esqlAcknowledgementMessages); ACKNOWLEDGMENT_MESSAGES = Collections.unmodifiableMap(messages); } @@ -243,6 +244,26 @@ private static String[] enterpriseSearchAcknowledgementMessages(OperationMode cu return Strings.EMPTY_ARRAY; } + private static String[] esqlAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + /* + * Provide an acknowledgement warning to customers that downgrade from Trial or Enterprise to a lower + * license level (Basic, Standard, Gold or Premium) that they will no longer be able to do CCS in ES|QL. + */ + switch (newMode) { + case BASIC: + case STANDARD: + case GOLD: + case PLATINUM: + switch (currentMode) { + case TRIAL: + case ENTERPRISE: + return new String[] { "ES|QL cross-cluster search will be disabled." }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + private static String[] machineLearningAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { switch (newMode) { case BASIC: diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java index 311f3484900f2..bc9c3474ee63a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java @@ -20,8 +20,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -73,17 +71,17 @@ public AllocateAction( Map require ) { if (include == null) { - this.include = Collections.emptyMap(); + this.include = Map.of(); } else { this.include = include; } if (exclude == null) { - this.exclude = Collections.emptyMap(); + this.exclude = Map.of(); } else { this.exclude = exclude; } if (require == null) { - this.require = Collections.emptyMap(); + this.require = Map.of(); } else { this.require = require; } @@ -201,7 +199,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { } UpdateSettingsStep allocateStep = new UpdateSettingsStep(allocateKey, allocationRoutedKey, client, newSettings.build()); AllocationRoutedStep routedCheckStep = new AllocationRoutedStep(allocationRoutedKey, nextStepKey); - return Arrays.asList(allocateStep, routedCheckStep); + return List.of(allocateStep, routedCheckStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java index 7cdef6207c487..bc3fc0ccae02c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStep.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import java.util.Collections; +import java.util.List; import static org.elasticsearch.xpack.core.ilm.step.info.AllocationInfo.allShardsActiveAllocationInfo; import static org.elasticsearch.xpack.core.ilm.step.info.AllocationInfo.waitingForActiveShardsAllocationInfo; @@ -62,7 +62,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { } AllocationDeciders allocationDeciders = new AllocationDeciders( - Collections.singletonList( + List.of( new FilterAllocationDecider( clusterState.getMetadata().settings(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java index 6a72af5bce5e9..fc5e8d473b763 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java @@ -8,6 +8,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.ToXContentObject; @@ -20,6 +21,7 @@ */ public abstract class AsyncWaitStep extends Step { + @Nullable private final Client client; public AsyncWaitStep(StepKey key, StepKey nextStepKey, Client client) { @@ -27,6 +29,7 @@ public AsyncWaitStep(StepKey key, StepKey nextStepKey, Client client) { this.client = client; } + @Nullable protected Client getClient() { return client; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java index d1dbfede63c60..4ed83fa170ead 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java @@ -33,22 +33,6 @@ public boolean isCompletable() { return true; } - public static class Result { - private final boolean complete; - private final ToXContentObject informationContext; - - public Result(boolean complete, ToXContentObject informationContext) { - this.complete = complete; - this.informationContext = informationContext; - } - - public boolean isComplete() { - return complete; - } - - public ToXContentObject getInformationContext() { - return informationContext; - } - } + public record Result(boolean complete, ToXContentObject informationContext) {} } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java index 5e30baa6b9669..c7fa1ea611a0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java @@ -62,7 +62,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { Result stepResult = stepToExecute.isConditionMet(index, clusterState); - if (stepResult.isComplete() == false) { + if (stepResult.complete() == false) { // checking the threshold after we execute the step to make sure we execute the wrapped step at least once (because time is a // wonderful thing) TimeValue retryThreshold = LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD_SETTING.get(idxMeta.getSettings()); @@ -77,7 +77,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { getKey().name(), getKey().action(), idxMeta.getIndex().getName(), - Strings.toString(stepResult.getInformationContext()), + Strings.toString(stepResult.informationContext()), nextKeyOnThresholdBreach ); logger.debug(message); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java index d212492f14d01..6c2ab86995a6d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java @@ -17,7 +17,6 @@ import java.io.IOException; import java.time.Instant; -import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -94,12 +93,11 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep( waitTimeSeriesEndTimePassesKey, cleanSnapshotKey, - Instant::now, - client + Instant::now ); CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, deleteStepKey, client); DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client); - return Arrays.asList(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, cleanupSnapshotStep, deleteStep); + return List.of(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, cleanupSnapshotStep, deleteStep); } else { WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep( waitForNoFollowerStepKey, @@ -109,11 +107,10 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep( waitTimeSeriesEndTimePassesKey, deleteStepKey, - Instant::now, - client + Instant::now ); DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client); - return Arrays.asList(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, deleteStep); + return List.of(waitForNoFollowersStep, waitUntilTimeSeriesEndTimeStep, deleteStep); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index 697f948e47832..6ce9e05e4a464 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -200,8 +200,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep( waitTimeSeriesEndTimePassesKey, readOnlyKey, - Instant::now, - client + Instant::now ); // Mark source index as read-only ReadOnlyStep readOnlyStep = new ReadOnlyStep(readOnlyKey, generateDownsampleIndexNameKey, client); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeAction.java index f8f4ce2bb0354..ac398bccb64e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeAction.java @@ -162,8 +162,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep( waitTimeSeriesEndTimePassesKey, codecChange ? closeKey : forceMergeKey, - Instant::now, - client + Instant::now ); // Indices already in this step key when upgrading need to know how to move forward but stop making the index diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java index f3afe9e4d52cc..741fff63f61f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Objects; -import java.util.stream.Collectors; /** * Invokes a force merge on a single index. @@ -67,10 +66,7 @@ public void performAction( policyName, failures == null ? "n/a" - : Strings.collectionToDelimitedString( - Arrays.stream(failures).map(Strings::toString).collect(Collectors.toList()), - "," - ), + : Strings.collectionToDelimitedString(Arrays.stream(failures).map(Strings::toString).toList(), ","), NAME ); logger.warn(errorMessage); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java index 67763e781e5a5..09e625b96135c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/FreezeAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; import java.util.List; /** @@ -98,7 +97,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { ); CheckNotDataStreamWriteIndexStep checkNoWriteIndexStep = new CheckNotDataStreamWriteIndexStep(checkNotWriteIndex, freezeStepKey); FreezeStep freezeStep = new FreezeStep(freezeStepKey, nextStepKey, client); - return Arrays.asList(conditionalSkipFreezeStep, checkNoWriteIndexStep, freezeStep); + return List.of(conditionalSkipFreezeStep, checkNoWriteIndexStep, freezeStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java index 1a64e589d20b5..6a272b0d2271e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; /** * A utility class used for index lifecycle policies @@ -121,7 +120,7 @@ public static ItemUsage calculateUsage( .stream() .filter(indexMetadata -> policyName.equals(indexMetadata.getLifecyclePolicyName())) .map(indexMetadata -> indexMetadata.getIndex().getName()) - .collect(Collectors.toList()); + .toList(); final List allDataStreams = indexNameExpressionResolver.dataStreamNames( state, @@ -136,12 +135,12 @@ public static ItemUsage calculateUsage( } else { return false; } - }).collect(Collectors.toList()); + }).toList(); final List composableTemplates = state.metadata().templatesV2().keySet().stream().filter(templateName -> { Settings settings = MetadataIndexTemplateService.resolveSettings(state.metadata(), templateName); return policyName.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(settings)); - }).collect(Collectors.toList()); + }).toList(); return new ItemUsage(indices, dataStreams, composableTemplates); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java index 7d045f2950e1b..82d41b91fea4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java @@ -41,6 +41,7 @@ public class MountSnapshotStep extends AsyncRetryDuringSnapshotActionStep { private final MountSearchableSnapshotRequest.Storage storageType; @Nullable private final Integer totalShardsPerNode; + private final int replicas; public MountSnapshotStep( StepKey key, @@ -48,7 +49,8 @@ public MountSnapshotStep( Client client, String restoredIndexPrefix, MountSearchableSnapshotRequest.Storage storageType, - @Nullable Integer totalShardsPerNode + @Nullable Integer totalShardsPerNode, + int replicas ) { super(key, nextStepKey, client); this.restoredIndexPrefix = restoredIndexPrefix; @@ -57,16 +59,10 @@ public MountSnapshotStep( throw new IllegalArgumentException("[" + SearchableSnapshotAction.TOTAL_SHARDS_PER_NODE.getPreferredName() + "] must be >= 1"); } this.totalShardsPerNode = totalShardsPerNode; - } - public MountSnapshotStep( - StepKey key, - StepKey nextStepKey, - Client client, - String restoredIndexPrefix, - MountSearchableSnapshotRequest.Storage storageType - ) { - this(key, nextStepKey, client, restoredIndexPrefix, storageType, null); + // this isn't directly settable by the user, so validation by assertion is sufficient + assert replicas >= 0 : "number of replicas must be gte zero, but was [" + replicas + "]"; + this.replicas = replicas; } @Override @@ -87,6 +83,10 @@ public Integer getTotalShardsPerNode() { return totalShardsPerNode; } + public int getReplicas() { + return replicas; + } + @Override void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentClusterState, ActionListener listener) { String indexName = indexMetadata.getIndex().getName(); @@ -162,11 +162,13 @@ void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentCl } final Settings.Builder settingsBuilder = Settings.builder(); - overrideTierPreference(this.getKey().phase()).ifPresent(override -> settingsBuilder.put(DataTier.TIER_PREFERENCE, override)); if (totalShardsPerNode != null) { settingsBuilder.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), totalShardsPerNode); } + if (replicas > 0) { + settingsBuilder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicas); + } final MountSearchableSnapshotRequest mountSearchableSnapshotRequest = new MountSearchableSnapshotRequest( TimeValue.MAX_VALUE, @@ -245,7 +247,7 @@ String[] ignoredIndexSettings() { @Override public int hashCode() { - return Objects.hash(super.hashCode(), restoredIndexPrefix, storageType, totalShardsPerNode); + return Objects.hash(super.hashCode(), restoredIndexPrefix, storageType, totalShardsPerNode, replicas); } @Override @@ -260,6 +262,7 @@ public boolean equals(Object obj) { return super.equals(obj) && Objects.equals(restoredIndexPrefix, other.restoredIndexPrefix) && Objects.equals(storageType, other.storageType) - && Objects.equals(totalShardsPerNode, other.totalShardsPerNode); + && Objects.equals(totalShardsPerNode, other.totalShardsPerNode) + && Objects.equals(replicas, other.replicas); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java index e3719d57ca25c..aaaaf9943a611 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Priority; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; import static org.elasticsearch.xpack.core.ilm.LifecycleOperationMetadata.currentILMMode; import static org.elasticsearch.xpack.core.ilm.LifecycleOperationMetadata.currentSLMMode; @@ -143,7 +144,10 @@ private ClusterState updateSLMState(final ClusterState currentState) { @Override public void onFailure(Exception e) { - logger.error("unable to update lifecycle metadata with new ilm mode [" + ilmMode + "], slm mode [" + slmMode + "]", e); + logger.error( + () -> Strings.format("unable to update lifecycle metadata with new ilm mode [%s], slm mode [%s]", ilmMode, slmMode), + e + ); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java index 117abecafeab3..b36156842acf5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReadOnlyAction.java @@ -17,7 +17,6 @@ import java.io.IOException; import java.time.Instant; -import java.util.Arrays; import java.util.List; /** @@ -68,11 +67,10 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep( waitTimeSeriesEndTimePassesKey, readOnlyKey, - Instant::now, - client + Instant::now ); ReadOnlyStep readOnlyStep = new ReadOnlyStep(readOnlyKey, nextStepKey, client); - return Arrays.asList(checkNotWriteIndexStep, waitUntilTimeSeriesEndTimeStep, readOnlyStep); + return List.of(checkNotWriteIndexStep, waitUntilTimeSeriesEndTimeStep, readOnlyStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java index 515941bce841a..f3c72004d6cc9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Objects; @@ -172,7 +171,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) client, INDEXING_COMPLETE ); - return Arrays.asList(waitForRolloverReadyStep, rolloverStep, waitForActiveShardsStep, updateDateStep, setIndexingCompleteStep); + return List.of(waitForRolloverReadyStep, rolloverStep, waitForActiveShardsStep, updateDateStep, setIndexingCompleteStep); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java index f585575534b76..b746ee8ea7c07 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java @@ -113,6 +113,7 @@ public String getSnapshotRepository() { return snapshotRepository; } + @Nullable public Integer getTotalShardsPerNode() { return totalShardsPerNode; } @@ -230,8 +231,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep( waitTimeSeriesEndTimePassesKey, skipGeneratingSnapshotKey, - Instant::now, - client + Instant::now ); // When generating a snapshot, we either jump to the force merge step, or we skip the @@ -318,7 +318,8 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac client, getRestoredIndexPrefix(mountSnapshotKey), storageType, - totalShardsPerNode + totalShardsPerNode, + 0 ); WaitForIndexColorStep waitForGreenIndexHealthStep = new WaitForIndexColorStep( waitForGreenRestoredIndexKey, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java index 800ea603ede8c..95ca049740c73 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java @@ -67,10 +67,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, response.getFailedShards(), failures == null ? "n/a" - : Strings.collectionToDelimitedString( - Arrays.stream(failures).map(Strings::toString).collect(Collectors.toList()), - "," - ) + : Strings.collectionToDelimitedString(Arrays.stream(failures).map(Strings::toString).toList(), ",") ); listener.onResponse(true, new Info(-1)); } else { @@ -114,9 +111,7 @@ public boolean equals(Object obj) { return super.equals(obj) && Objects.equals(maxNumSegments, other.maxNumSegments); } - public static class Info implements ToXContentObject { - - private final long numberShardsLeftToMerge; + public record Info(long numberShardsLeftToMerge) implements ToXContentObject { static final ParseField SHARDS_TO_MERGE = new ParseField("shards_left_to_merge"); static final ParseField MESSAGE = new ParseField("message"); @@ -124,19 +119,12 @@ public static class Info implements ToXContentObject { "segment_count_step_info", a -> new Info((long) a[0]) ); + static { PARSER.declareLong(ConstructingObjectParser.constructorArg(), SHARDS_TO_MERGE); PARSER.declareString((i, s) -> {}, MESSAGE); } - public Info(long numberShardsLeftToMerge) { - this.numberShardsLeftToMerge = numberShardsLeftToMerge; - } - - public long getNumberShardsLeftToMerge() { - return numberShardsLeftToMerge; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -150,23 +138,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public int hashCode() { - return Objects.hash(numberShardsLeftToMerge); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Info other = (Info) obj; - return Objects.equals(numberShardsLeftToMerge, other.numberShardsLeftToMerge); - } - @Override public String toString() { return Strings.toString(this); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java index 376567bc2004c..5f7c1d0c3bf3a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -101,7 +100,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { Settings indexPriority = recoveryPriority == null ? NULL_PRIORITY_SETTINGS : Settings.builder().put(IndexMetadata.INDEX_PRIORITY_SETTING.getKey(), recoveryPriority).build(); - return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, indexPriority)); + return List.of(new UpdateSettingsStep(key, nextStepKey, client, indexPriority)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java index 401d87f853360..f7478518613e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java @@ -31,7 +31,6 @@ import java.time.Instant; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.elasticsearch.xpack.core.ilm.ShrinkIndexNameSupplier.SHRUNKEN_INDEX_PREFIX; @@ -232,8 +231,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep( waitTimeSeriesEndTimePassesKey, readOnlyKey, - Instant::now, - client + Instant::now ); ReadOnlyStep readOnlyStep = new ReadOnlyStep(readOnlyKey, checkTargetShardsCountKey, client); CheckTargetShardsCountStep checkTargetShardsCountStep = new CheckTargetShardsCountStep( @@ -329,7 +327,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) allowWriteAfterShrinkStep ); - return steps.filter(Objects::nonNull).collect(Collectors.toList()); + return steps.filter(Objects::nonNull).toList(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java index 48a0e65bddf22..10a4c7086a0cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java @@ -14,7 +14,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -28,9 +27,6 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.util.stream.Collectors.toList; /** * Represents the lifecycle of an index from creation to deletion. A @@ -52,7 +48,7 @@ public class TimeseriesLifecycleType implements LifecycleType { static final String DELETE_PHASE = "delete"; public static final List ORDERED_VALID_PHASES = List.of(HOT_PHASE, WARM_PHASE, COLD_PHASE, FROZEN_PHASE, DELETE_PHASE); - public static final List ORDERED_VALID_HOT_ACTIONS = Stream.of( + public static final List ORDERED_VALID_HOT_ACTIONS = List.of( SetPriorityAction.NAME, UnfollowAction.NAME, RolloverAction.NAME, @@ -61,8 +57,8 @@ public class TimeseriesLifecycleType implements LifecycleType { ShrinkAction.NAME, ForceMergeAction.NAME, SearchableSnapshotAction.NAME - ).filter(Objects::nonNull).toList(); - public static final List ORDERED_VALID_WARM_ACTIONS = Stream.of( + ); + public static final List ORDERED_VALID_WARM_ACTIONS = List.of( SetPriorityAction.NAME, UnfollowAction.NAME, ReadOnlyAction.NAME, @@ -71,8 +67,8 @@ public class TimeseriesLifecycleType implements LifecycleType { MigrateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME - ).filter(Objects::nonNull).toList(); - public static final List ORDERED_VALID_COLD_ACTIONS = Stream.of( + ); + public static final List ORDERED_VALID_COLD_ACTIONS = List.of( SetPriorityAction.NAME, UnfollowAction.NAME, ReadOnlyAction.NAME, @@ -81,7 +77,7 @@ public class TimeseriesLifecycleType implements LifecycleType { AllocateAction.NAME, MigrateAction.NAME, FreezeAction.NAME - ).filter(Objects::nonNull).toList(); + ); public static final List ORDERED_VALID_FROZEN_ACTIONS = List.of(UnfollowAction.NAME, SearchableSnapshotAction.NAME); public static final List ORDERED_VALID_DELETE_ACTIONS = List.of(WaitForSnapshotAction.NAME, DeleteAction.NAME); @@ -114,7 +110,7 @@ public class TimeseriesLifecycleType implements LifecycleType { // Set of actions that cannot be defined (executed) after the managed index has been mounted as searchable snapshot. // It's ordered to produce consistent error messages which can be unit tested. public static final Set ACTIONS_CANNOT_FOLLOW_SEARCHABLE_SNAPSHOT = Collections.unmodifiableSet( - new LinkedHashSet<>(Arrays.asList(ForceMergeAction.NAME, FreezeAction.NAME, ShrinkAction.NAME, DownsampleAction.NAME)) + new LinkedHashSet<>(List.of(ForceMergeAction.NAME, FreezeAction.NAME, ShrinkAction.NAME, DownsampleAction.NAME)) ); private TimeseriesLifecycleType() {} @@ -180,11 +176,11 @@ public static boolean shouldInjectMigrateStepForPhase(Phase phase) { public List getOrderedActions(Phase phase) { Map actions = phase.getActions(); return switch (phase.getName()) { - case HOT_PHASE -> ORDERED_VALID_HOT_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case WARM_PHASE -> ORDERED_VALID_WARM_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case COLD_PHASE -> ORDERED_VALID_COLD_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case FROZEN_PHASE -> ORDERED_VALID_FROZEN_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); - case DELETE_PHASE -> ORDERED_VALID_DELETE_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).collect(toList()); + case HOT_PHASE -> ORDERED_VALID_HOT_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case WARM_PHASE -> ORDERED_VALID_WARM_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case COLD_PHASE -> ORDERED_VALID_COLD_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case FROZEN_PHASE -> ORDERED_VALID_FROZEN_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); + case DELETE_PHASE -> ORDERED_VALID_DELETE_ACTIONS.stream().map(actions::get).filter(Objects::nonNull).toList(); default -> throw new IllegalArgumentException("lifecycle type [" + TYPE + "] does not support phase [" + phase.getName() + "]"); }; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java index 31aaba551a3f3..6bb0178f1471e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -72,7 +71,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { UnfollowFollowerIndexStep step5 = new UnfollowFollowerIndexStep(unfollowFollowerIndex, openFollowerIndex, client); OpenIndexStep step6 = new OpenIndexStep(openFollowerIndex, waitForYellowStep, client); WaitForIndexColorStep step7 = new WaitForIndexColorStep(waitForYellowStep, nextStepKey, ClusterHealthStatus.YELLOW); - return Arrays.asList(conditionalSkipUnfollowStep, step1, step2, step3, step4, step5, step6, step7); + return List.of(conditionalSkipUnfollowStep, step1, step2, step3, step4, step5, step6, step7); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java index f1fbdde1e9a5d..590890405b8d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStep.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; @@ -70,35 +68,25 @@ static void handleResponse(FollowStatsAction.StatsResponses responses, Listener if (conditionMet) { listener.onResponse(true, null); } else { - List shardFollowTaskInfos = unSyncedShardFollowStatuses.stream() + List shardFollowTaskInfos = unSyncedShardFollowStatuses.stream() .map( - status -> new Info.ShardFollowTaskInfo( + status -> new ShardFollowTaskInfo( status.followerIndex(), status.getShardId(), status.leaderGlobalCheckpoint(), status.followerGlobalCheckpoint() ) ) - .collect(Collectors.toList()); + .toList(); listener.onResponse(false, new Info(shardFollowTaskInfos)); } } - static final class Info implements ToXContentObject { + record Info(List shardFollowTaskInfos) implements ToXContentObject { static final ParseField SHARD_FOLLOW_TASKS = new ParseField("shard_follow_tasks"); static final ParseField MESSAGE = new ParseField("message"); - private final List shardFollowTaskInfos; - - Info(List shardFollowTaskInfos) { - this.shardFollowTaskInfos = shardFollowTaskInfos; - } - - List getShardFollowTaskInfos() { - return shardFollowTaskInfos; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -114,85 +102,30 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Info info = (Info) o; - return Objects.equals(shardFollowTaskInfos, info.shardFollowTaskInfos); - } - - @Override - public int hashCode() { - return Objects.hash(shardFollowTaskInfos); - } - @Override public String toString() { return Strings.toString(this); } + } - static final class ShardFollowTaskInfo implements ToXContentObject { - - static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); - static final ParseField SHARD_ID_FIELD = new ParseField("shard_id"); - static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); - static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); - - private final String followerIndex; - private final int shardId; - private final long leaderGlobalCheckpoint; - private final long followerGlobalCheckpoint; - - ShardFollowTaskInfo(String followerIndex, int shardId, long leaderGlobalCheckpoint, long followerGlobalCheckpoint) { - this.followerIndex = followerIndex; - this.shardId = shardId; - this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; - this.followerGlobalCheckpoint = followerGlobalCheckpoint; - } - - String getFollowerIndex() { - return followerIndex; - } - - int getShardId() { - return shardId; - } - - long getLeaderGlobalCheckpoint() { - return leaderGlobalCheckpoint; - } - - long getFollowerGlobalCheckpoint() { - return followerGlobalCheckpoint; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); - builder.field(SHARD_ID_FIELD.getPreferredName(), shardId); - builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); - builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); - builder.endObject(); - return builder; - } + record ShardFollowTaskInfo(String followerIndex, int shardId, long leaderGlobalCheckpoint, long followerGlobalCheckpoint) + implements + ToXContentObject { - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ShardFollowTaskInfo that = (ShardFollowTaskInfo) o; - return shardId == that.shardId - && leaderGlobalCheckpoint == that.leaderGlobalCheckpoint - && followerGlobalCheckpoint == that.followerGlobalCheckpoint - && Objects.equals(followerIndex, that.followerIndex); - } + static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); + static final ParseField SHARD_ID_FIELD = new ParseField("shard_id"); + static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); + static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); - @Override - public int hashCode() { - return Objects.hash(followerIndex, shardId, leaderGlobalCheckpoint, followerGlobalCheckpoint); - } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); + builder.field(SHARD_ID_FIELD.getPreferredName(), shardId); + builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); + builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); + builder.endObject(); + return builder; } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java index 08a884f0b8f3c..2633656d7c30c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -62,7 +61,7 @@ public String getPolicy() { @Override public List toSteps(Client client, String phase, StepKey nextStepKey) { StepKey waitForSnapshotKey = new StepKey(phase, NAME, WaitForSnapshotStep.NAME); - return Collections.singletonList(new WaitForSnapshotStep(waitForSnapshotKey, nextStepKey, client, policy)); + return List.of(new WaitForSnapshotStep(waitForSnapshotKey, nextStepKey, client, policy)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java index 50a7d48672c8e..3e190a26dd961 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; @@ -33,8 +32,8 @@ public class WaitUntilTimeSeriesEndTimePassesStep extends AsyncWaitStep { public static final String NAME = "check-ts-end-time-passed"; private final Supplier nowSupplier; - public WaitUntilTimeSeriesEndTimePassesStep(StepKey key, StepKey nextStepKey, Supplier nowSupplier, Client client) { - super(key, nextStepKey, client); + public WaitUntilTimeSeriesEndTimePassesStep(StepKey key, StepKey nextStepKey, Supplier nowSupplier) { + super(key, nextStepKey, null); this.nowSupplier = nowSupplier; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationInfo.java index 5732f5e72a42f..9f280bd344083 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationInfo.java @@ -14,19 +14,15 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Objects; /** * Represents the state of an index's shards allocation, including a user friendly message describing the current state. * It allows to transfer the allocation information to {@link org.elasticsearch.xcontent.XContent} using * {@link #toXContent(XContentBuilder, Params)} */ -public class AllocationInfo implements ToXContentObject { - - private final long numberOfReplicas; - private final long numberShardsLeftToAllocate; - private final boolean allShardsActive; - private final String message; +public record AllocationInfo(long numberOfReplicas, long numberShardsLeftToAllocate, boolean allShardsActive, String message) + implements + ToXContentObject { static final ParseField NUMBER_OF_REPLICAS = new ParseField("number_of_replicas"); static final ParseField SHARDS_TO_ALLOCATE = new ParseField("shards_left_to_allocate"); @@ -44,13 +40,6 @@ public class AllocationInfo implements ToXContentObject { PARSER.declareString(ConstructingObjectParser.constructorArg(), MESSAGE); } - public AllocationInfo(long numberOfReplicas, long numberShardsLeftToAllocate, boolean allShardsActive, String message) { - this.numberOfReplicas = numberOfReplicas; - this.numberShardsLeftToAllocate = numberShardsLeftToAllocate; - this.allShardsActive = allShardsActive; - this.message = message; - } - /** * Builds the AllocationInfo representing a cluster state with a routing table that does not have enough shards active for a * particular index. @@ -72,22 +61,6 @@ public static AllocationInfo allShardsActiveAllocationInfo(long numReplicas, lon ); } - public long getNumberOfReplicas() { - return numberOfReplicas; - } - - public long getNumberShardsLeftToAllocate() { - return numberShardsLeftToAllocate; - } - - public boolean allShardsActive() { - return allShardsActive; - } - - public String getMessage() { - return message; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -99,26 +72,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public int hashCode() { - return Objects.hash(numberOfReplicas, numberShardsLeftToAllocate, allShardsActive); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - AllocationInfo other = (AllocationInfo) obj; - return Objects.equals(numberOfReplicas, other.numberOfReplicas) - && Objects.equals(numberShardsLeftToAllocate, other.numberShardsLeftToAllocate) - && Objects.equals(message, other.message) - && Objects.equals(allShardsActive, other.allShardsActive); - } - @Override public String toString() { return Strings.toString(this); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/SingleMessageFieldInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/SingleMessageFieldInfo.java index 8d7eb8c3d303b..bd23e21d46489 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/SingleMessageFieldInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/step/info/SingleMessageFieldInfo.java @@ -12,20 +12,13 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Objects; /** * A simple object that allows a `message` field to be transferred to `XContent`. */ -public class SingleMessageFieldInfo implements ToXContentObject { +public record SingleMessageFieldInfo(String message) implements ToXContentObject { - private final String message; - - static final ParseField MESSAGE = new ParseField("message"); - - public SingleMessageFieldInfo(String message) { - this.message = message; - } + private static final ParseField MESSAGE = new ParseField("message"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { @@ -35,24 +28,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public String getMessage() { - return message; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SingleMessageFieldInfo that = (SingleMessageFieldInfo) o; - return Objects.equals(message, that.message); - } - - @Override - public int hashCode() { - return Objects.hash(message); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/BaseInferenceActionRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/BaseInferenceActionRequest.java new file mode 100644 index 0000000000000..e426574c52ce6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/BaseInferenceActionRequest.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.inference.TaskType; + +import java.io.IOException; + +public abstract class BaseInferenceActionRequest extends ActionRequest { + + public BaseInferenceActionRequest() { + super(); + } + + public BaseInferenceActionRequest(StreamInput in) throws IOException { + super(in); + } + + public abstract boolean isStreaming(); + + public abstract TaskType getTaskType(); + + public abstract String getInferenceEntityId(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index a19edd5a08162..f88909ba4208e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -54,7 +53,7 @@ public InferenceAction() { super(NAME); } - public static class Request extends ActionRequest { + public static class Request extends BaseInferenceActionRequest { public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueSeconds(30); public static final ParseField INPUT = new ParseField("input"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionAction.java new file mode 100644 index 0000000000000..8d121463fb465 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionAction.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class UnifiedCompletionAction extends ActionType { + public static final UnifiedCompletionAction INSTANCE = new UnifiedCompletionAction(); + public static final String NAME = "cluster:monitor/xpack/inference/unified"; + + public UnifiedCompletionAction() { + super(NAME); + } + + public static class Request extends BaseInferenceActionRequest { + public static Request parseRequest(String inferenceEntityId, TaskType taskType, TimeValue timeout, XContentParser parser) + throws IOException { + var unifiedRequest = UnifiedCompletionRequest.PARSER.apply(parser, null); + return new Request(inferenceEntityId, taskType, unifiedRequest, timeout); + } + + private final String inferenceEntityId; + private final TaskType taskType; + private final UnifiedCompletionRequest unifiedCompletionRequest; + private final TimeValue timeout; + + public Request(String inferenceEntityId, TaskType taskType, UnifiedCompletionRequest unifiedCompletionRequest, TimeValue timeout) { + this.inferenceEntityId = Objects.requireNonNull(inferenceEntityId); + this.taskType = Objects.requireNonNull(taskType); + this.unifiedCompletionRequest = Objects.requireNonNull(unifiedCompletionRequest); + this.timeout = Objects.requireNonNull(timeout); + } + + public Request(StreamInput in) throws IOException { + super(in); + this.inferenceEntityId = in.readString(); + this.taskType = TaskType.fromStream(in); + this.unifiedCompletionRequest = new UnifiedCompletionRequest(in); + this.timeout = in.readTimeValue(); + } + + public TaskType getTaskType() { + return taskType; + } + + public String getInferenceEntityId() { + return inferenceEntityId; + } + + public UnifiedCompletionRequest getUnifiedCompletionRequest() { + return unifiedCompletionRequest; + } + + /** + * The Unified API only supports streaming so we always return true here. + * @return true + */ + public boolean isStreaming() { + return true; + } + + public TimeValue getTimeout() { + return timeout; + } + + @Override + public ActionRequestValidationException validate() { + if (unifiedCompletionRequest == null || unifiedCompletionRequest.messages() == null) { + var e = new ActionRequestValidationException(); + e.addValidationError("Field [messages] cannot be null"); + return e; + } + + if (unifiedCompletionRequest.messages().isEmpty()) { + var e = new ActionRequestValidationException(); + e.addValidationError("Field [messages] cannot be an empty array"); + return e; + } + + if (taskType.isAnyOrSame(TaskType.COMPLETION) == false) { + var e = new ActionRequestValidationException(); + e.addValidationError("Field [taskType] must be [completion]"); + return e; + } + + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(inferenceEntityId); + taskType.writeTo(out); + unifiedCompletionRequest.writeTo(out); + out.writeTimeValue(timeout); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(inferenceEntityId, request.inferenceEntityId) + && taskType == request.taskType + && Objects.equals(unifiedCompletionRequest, request.unifiedCompletionRequest) + && Objects.equals(timeout, request.timeout); + } + + @Override + public int hashCode() { + return Objects.hash(inferenceEntityId, taskType, unifiedCompletionRequest, timeout); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/StreamingUnifiedChatCompletionResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/StreamingUnifiedChatCompletionResults.java new file mode 100644 index 0000000000000..90038c67036c4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/StreamingUnifiedChatCompletionResults.java @@ -0,0 +1,329 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.results; + +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.inference.InferenceResults; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; +import java.util.Collections; +import java.util.Deque; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Flow; + +/** + * Chat Completion results that only contain a Flow.Publisher. + */ +public record StreamingUnifiedChatCompletionResults(Flow.Publisher publisher) + implements + InferenceServiceResults { + + public static final String NAME = "chat_completion_chunk"; + public static final String MODEL_FIELD = "model"; + public static final String OBJECT_FIELD = "object"; + public static final String USAGE_FIELD = "usage"; + public static final String INDEX_FIELD = "index"; + public static final String ID_FIELD = "id"; + public static final String FUNCTION_NAME_FIELD = "name"; + public static final String FUNCTION_ARGUMENTS_FIELD = "arguments"; + public static final String FUNCTION_FIELD = "function"; + public static final String CHOICES_FIELD = "choices"; + public static final String DELTA_FIELD = "delta"; + public static final String CONTENT_FIELD = "content"; + public static final String REFUSAL_FIELD = "refusal"; + public static final String ROLE_FIELD = "role"; + private static final String TOOL_CALLS_FIELD = "tool_calls"; + public static final String FINISH_REASON_FIELD = "finish_reason"; + public static final String COMPLETION_TOKENS_FIELD = "completion_tokens"; + public static final String TOTAL_TOKENS_FIELD = "total_tokens"; + public static final String PROMPT_TOKENS_FIELD = "prompt_tokens"; + public static final String TYPE_FIELD = "type"; + + @Override + public boolean isStreaming() { + return true; + } + + @Override + public List transformToCoordinationFormat() { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public List transformToLegacyFormat() { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public Map asMap() { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + throw new UnsupportedOperationException("Not implemented"); + } + + public record Results(Deque chunks) implements ChunkedToXContent { + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat(Iterators.flatMap(chunks.iterator(), c -> c.toXContentChunked(params))); + } + } + + public static class ChatCompletionChunk implements ChunkedToXContent { + private final String id; + + public String getId() { + return id; + } + + public List getChoices() { + return choices; + } + + public String getModel() { + return model; + } + + public String getObject() { + return object; + } + + public Usage getUsage() { + return usage; + } + + private final List choices; + private final String model; + private final String object; + private final ChatCompletionChunk.Usage usage; + + public ChatCompletionChunk(String id, List choices, String model, String object, ChatCompletionChunk.Usage usage) { + this.id = id; + this.choices = choices; + this.model = model; + this.object = object; + this.usage = usage; + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + + Iterator choicesIterator = Collections.emptyIterator(); + if (choices != null) { + choicesIterator = Iterators.concat( + ChunkedToXContentHelper.startArray(CHOICES_FIELD), + Iterators.flatMap(choices.iterator(), c -> c.toXContentChunked(params)), + ChunkedToXContentHelper.endArray() + ); + } + + Iterator usageIterator = Collections.emptyIterator(); + if (usage != null) { + usageIterator = Iterators.concat( + ChunkedToXContentHelper.startObject(USAGE_FIELD), + ChunkedToXContentHelper.field(COMPLETION_TOKENS_FIELD, usage.completionTokens()), + ChunkedToXContentHelper.field(PROMPT_TOKENS_FIELD, usage.promptTokens()), + ChunkedToXContentHelper.field(TOTAL_TOKENS_FIELD, usage.totalTokens()), + ChunkedToXContentHelper.endObject() + ); + } + + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + ChunkedToXContentHelper.field(ID_FIELD, id), + choicesIterator, + ChunkedToXContentHelper.field(MODEL_FIELD, model), + ChunkedToXContentHelper.field(OBJECT_FIELD, object), + usageIterator, + ChunkedToXContentHelper.endObject() + ); + } + + public record Choice(ChatCompletionChunk.Choice.Delta delta, String finishReason, int index) { + + /* + choices: Array<{ + delta: { ... }; + finish_reason: string | null; + index: number; + }>; + */ + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + delta.toXContentChunked(params), + ChunkedToXContentHelper.optionalField(FINISH_REASON_FIELD, finishReason), + ChunkedToXContentHelper.field(INDEX_FIELD, index), + ChunkedToXContentHelper.endObject() + ); + } + + public static class Delta { + private final String content; + private final String refusal; + private final String role; + private List toolCalls; + + public Delta(String content, String refusal, String role, List toolCalls) { + this.content = content; + this.refusal = refusal; + this.role = role; + this.toolCalls = toolCalls; + } + + /* + delta: { + content?: string | null; + refusal?: string | null; + role?: 'system' | 'user' | 'assistant' | 'tool'; + tool_calls?: Array<{ ... }>; + }; + */ + public Iterator toXContentChunked(ToXContent.Params params) { + var xContent = Iterators.concat( + ChunkedToXContentHelper.startObject(DELTA_FIELD), + ChunkedToXContentHelper.optionalField(CONTENT_FIELD, content), + ChunkedToXContentHelper.optionalField(REFUSAL_FIELD, refusal), + ChunkedToXContentHelper.optionalField(ROLE_FIELD, role) + ); + + if (toolCalls != null && toolCalls.isEmpty() == false) { + xContent = Iterators.concat( + xContent, + ChunkedToXContentHelper.startArray(TOOL_CALLS_FIELD), + Iterators.flatMap(toolCalls.iterator(), t -> t.toXContentChunked(params)), + ChunkedToXContentHelper.endArray() + ); + } + xContent = Iterators.concat(xContent, ChunkedToXContentHelper.endObject()); + return xContent; + + } + + public String getContent() { + return content; + } + + public String getRefusal() { + return refusal; + } + + public String getRole() { + return role; + } + + public List getToolCalls() { + return toolCalls; + } + + public static class ToolCall { + private final int index; + private final String id; + public ChatCompletionChunk.Choice.Delta.ToolCall.Function function; + private final String type; + + public ToolCall(int index, String id, ChatCompletionChunk.Choice.Delta.ToolCall.Function function, String type) { + this.index = index; + this.id = id; + this.function = function; + this.type = type; + } + + public int getIndex() { + return index; + } + + public String getId() { + return id; + } + + public ChatCompletionChunk.Choice.Delta.ToolCall.Function getFunction() { + return function; + } + + public String getType() { + return type; + } + + /* + index: number; + id?: string; + function?: { + arguments?: string; + name?: string; + }; + type?: 'function'; + */ + public Iterator toXContentChunked(ToXContent.Params params) { + var content = Iterators.concat( + ChunkedToXContentHelper.startObject(), + ChunkedToXContentHelper.field(INDEX_FIELD, index), + ChunkedToXContentHelper.optionalField(ID_FIELD, id) + ); + + if (function != null) { + content = Iterators.concat( + content, + ChunkedToXContentHelper.startObject(FUNCTION_FIELD), + ChunkedToXContentHelper.optionalField(FUNCTION_ARGUMENTS_FIELD, function.getArguments()), + ChunkedToXContentHelper.optionalField(FUNCTION_NAME_FIELD, function.getName()), + ChunkedToXContentHelper.endObject() + ); + } + + content = Iterators.concat( + content, + ChunkedToXContentHelper.field(TYPE_FIELD, type), + ChunkedToXContentHelper.endObject() + ); + return content; + } + + public static class Function { + private final String arguments; + private final String name; + + public Function(String arguments, String name) { + this.arguments = arguments; + this.name = name; + } + + public String getArguments() { + return arguments; + } + + public String getName() { + return name; + } + } + } + } + } + + public record Usage(int completionTokens, int promptTokens, int totalTokens) {} + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java index 6c49cadb8d189..25f5e0f0617c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java @@ -67,12 +67,12 @@ public final class MachineLearningField { License.OperationMode.PLATINUM ); - // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible + // Ideally this would be 8.3.0, but it has to be 6.4.0 because due to an oversight it's impossible // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. public static final MlConfigVersion MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = MlConfigVersion.fromString("6.4.0"); - // We tell the user we support model snapshots newer than 7.0.0 as that's the major version - // boundary, even though behind the scenes we have to support back to 6.4.0. - public static final MlConfigVersion MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = MlConfigVersion.V_7_0_0; + // We tell the user we support model snapshots newer than 8.3.0 as that's the version with the last format change, + // even though behind the scenes we have to support back to 6.4.0. + public static final MlConfigVersion MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = MlConfigVersion.V_8_3_0; private MachineLearningField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java index 9164fd88b6395..08e89a0fcab00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java @@ -492,7 +492,7 @@ private boolean hasMatchAllEquivalent( return false; } - private boolean canMatchShard(ShardId shardId, NodeTermsEnumRequest req) throws IOException { + private boolean canMatchShard(ShardId shardId, NodeTermsEnumRequest req) { if (req.indexFilter() == null || req.indexFilter() instanceof MatchAllQueryBuilder) { return true; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index e889d25cd7a96..d788a0b5abd37 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.core.XPackField; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -59,6 +60,12 @@ void assertAckMessages(String feature, OperationMode from, OperationMode to, int assertEquals(expectedMessages, gotMessages.length); } + void assertAckMessages(String feature, OperationMode from, OperationMode to, Set expectedMessages) { + String[] gotMessages = XPackLicenseState.ACKNOWLEDGMENT_MESSAGES.get(feature).apply(from, to); + Set actualMessages = Arrays.stream(gotMessages).collect(Collectors.toSet()); + assertThat(actualMessages, equalTo(expectedMessages)); + } + static T randomFrom(T[] values, Predicate filter) { return randomFrom(Arrays.stream(values).filter(filter).collect(Collectors.toList())); } @@ -143,6 +150,16 @@ public void testCcrAckTrialOrPlatinumToNotTrialOrPlatinum() { assertAckMessages(XPackField.CCR, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1); } + public void testEsqlAckToTrialOrPlatinum() { + assertAckMessages(XPackField.ESQL, randomMode(), randomFrom(TRIAL, ENTERPRISE), 0); + } + + public void testEsqlAckTrialOrEnterpriseToNotTrialOrEnterprise() { + for (OperationMode to : List.of(BASIC, STANDARD, GOLD, PLATINUM)) { + assertAckMessages(XPackField.ESQL, randomFrom(TRIAL, ENTERPRISE), to, Set.of("ES|QL cross-cluster search will be disabled.")); + } + } + public void testExpiredLicense() { // use standard feature which would normally be allowed at all license levels LicensedFeature feature = LicensedFeature.momentary("family", "enterpriseFeature", STANDARD); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java index 1fc0afafde353..c5a8185f8511b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocateActionTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -44,20 +43,20 @@ static AllocateAction randomInstance() { includes = randomAllocationRoutingMap(1, 100); hasAtLeastOneMap = true; } else { - includes = randomBoolean() ? null : Collections.emptyMap(); + includes = randomBoolean() ? null : Map.of(); } Map excludes; if (randomBoolean()) { hasAtLeastOneMap = true; excludes = randomAllocationRoutingMap(1, 100); } else { - excludes = randomBoolean() ? null : Collections.emptyMap(); + excludes = randomBoolean() ? null : Map.of(); } Map requires; if (hasAtLeastOneMap == false || randomBoolean()) { requires = randomAllocationRoutingMap(1, 100); } else { - requires = randomBoolean() ? null : Collections.emptyMap(); + requires = randomBoolean() ? null : Map.of(); } Integer numberOfReplicas = randomBoolean() ? null : randomIntBetween(0, 10); Integer totalShardsPerNode = randomBoolean() ? null : randomIntBetween(-1, 10); @@ -97,9 +96,9 @@ protected AllocateAction mutateInstance(AllocateAction instance) { } public void testAllMapsNullOrEmpty() { - Map include = randomBoolean() ? null : Collections.emptyMap(); - Map exclude = randomBoolean() ? null : Collections.emptyMap(); - Map require = randomBoolean() ? null : Collections.emptyMap(); + Map include = randomBoolean() ? null : Map.of(); + Map exclude = randomBoolean() ? null : Map.of(); + Map require = randomBoolean() ? null : Map.of(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> new AllocateAction(null, null, include, exclude, require) @@ -124,8 +123,8 @@ public void testAllMapsNullOrEmpty() { public void testInvalidNumberOfReplicas() { Map include = randomAllocationRoutingMap(1, 5); - Map exclude = randomBoolean() ? null : Collections.emptyMap(); - Map require = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Map.of(); + Map require = randomBoolean() ? null : Map.of(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> new AllocateAction(randomIntBetween(-1000, -1), randomIntBetween(0, 300), include, exclude, require) @@ -135,8 +134,8 @@ public void testInvalidNumberOfReplicas() { public void testInvalidTotalShardsPerNode() { Map include = randomAllocationRoutingMap(1, 5); - Map exclude = randomBoolean() ? null : Collections.emptyMap(); - Map require = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Map.of(); + Map require = randomBoolean() ? null : Map.of(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> new AllocateAction(randomIntBetween(0, 300), randomIntBetween(-1000, -2), include, exclude, require) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java index 415014623f340..708c3630b8b8a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.ilm.ClusterStateWaitStep.Result; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; import java.util.Map; import static org.elasticsearch.cluster.routing.TestShardRouting.buildUnassignedInfo; @@ -109,7 +108,7 @@ public void testConditionMet() { public void testRequireConditionMetOnlyOneCopyAllocated() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map requires = Collections.singletonMap(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "foo", "bar"); + Map requires = Map.of(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "foo", "bar"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); @@ -181,13 +180,13 @@ public void testClusterExcludeFiltersConditionMetOnlyOneCopyAllocated() { Result actualResult = step.isConditionMet(index, clusterState); Result expectedResult = new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(1, 1)); - assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); + assertEquals(expectedResult.complete(), actualResult.complete()); + assertEquals(expectedResult.informationContext(), actualResult.informationContext()); } public void testExcludeConditionMetOnlyOneCopyAllocated() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map excludes = Collections.singletonMap(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); + Map excludes = Map.of(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); @@ -218,7 +217,7 @@ public void testExcludeConditionMetOnlyOneCopyAllocated() { public void testIncludeConditionMetOnlyOneCopyAllocated() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map includes = Collections.singletonMap(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); + Map includes = Map.of(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "foo", "bar"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); @@ -495,8 +494,8 @@ public void testExecuteIndexMissing() throws Exception { AllocationRoutedStep step = createRandomInstance(); Result actualResult = step.isConditionMet(index, clusterState); - assertFalse(actualResult.isComplete()); - assertNull(actualResult.getInformationContext()); + assertFalse(actualResult.complete()); + assertNull(actualResult.informationContext()); } private void assertAllocateStatus( @@ -537,7 +536,7 @@ private void assertAllocateStatus( .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) .build(); Result actualResult = step.isConditionMet(index, clusterState); - assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); + assertEquals(expectedResult.complete(), actualResult.complete()); + assertEquals(expectedResult.informationContext(), actualResult.informationContext()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java index af9aa0982d61d..54c6ceb814af8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java @@ -59,8 +59,8 @@ public void testStepCompleteIfIndexIsNotPartOfDataStream() { .build(); ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), is(nullValue())); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), is(nullValue())); } public void testStepIncompleteIfIndexIsTheDataStreamWriteIndex() { @@ -94,10 +94,10 @@ public void testStepIncompleteIfIndexIsTheDataStreamWriteIndex() { IndexMetadata indexToOperateOn = useFailureStore ? failureIndexMetadata : indexMetadata; String expectedIndexName = indexToOperateOn.getIndex().getName(); ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(indexToOperateOn.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); assertThat( - info.getMessage(), + info.message(), is( "index [" + expectedIndexName @@ -161,7 +161,7 @@ public void testStepCompleteIfPartOfDataStreamButNotWriteIndex() { boolean useFailureStore = randomBoolean(); IndexMetadata indexToOperateOn = useFailureStore ? failureIndexMetadata : indexMetadata; ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(indexToOperateOn.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), is(nullValue())); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), is(nullValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java index 371f7def67c52..72bf7cedb2fb9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java @@ -29,9 +29,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.Node; -import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.SIGTERM; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; @@ -340,7 +340,7 @@ public void testExecuteAllocateReplicaUnassigned() { */ public void testExecuteReplicasNotAllocatedOnSingleNode() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map requires = Collections.singletonMap("_id", "node1"); + Map requires = Map.of("_id", "node1"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") @@ -376,7 +376,7 @@ public void testExecuteReplicasNotAllocatedOnSingleNode() { public void testExecuteReplicasButCopiesNotPresent() { Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); - Map requires = Collections.singletonMap("_id", "node1"); + Map requires = Map.of("_id", "node1"); Settings.Builder existingSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") @@ -417,8 +417,8 @@ public void testExecuteIndexMissing() throws Exception { CheckShrinkReadyStep step = createRandomInstance(); ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); - assertFalse(actualResult.isComplete()); - assertNull(actualResult.getInformationContext()); + assertFalse(actualResult.complete()); + assertNull(actualResult.informationContext()); } public void testStepCompletableIfAllShardsActive() { @@ -458,7 +458,7 @@ public void testStepCompletableIfAllShardsActive() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "node1", SingleNodeShutdownMetadata.builder() .setType(type) @@ -495,7 +495,7 @@ public void testStepCompletableIfAllShardsActive() { .build(); assertTrue(step.isCompletable()); ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); - assertTrue(actualResult.isComplete()); + assertTrue(actualResult.complete()); assertTrue(step.isCompletable()); } } @@ -537,7 +537,7 @@ public void testStepBecomesUncompletable() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "node1", SingleNodeShutdownMetadata.builder() .setType(type) @@ -574,9 +574,9 @@ public void testStepBecomesUncompletable() { .build(); assertTrue(step.isCompletable()); ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); - assertFalse(actualResult.isComplete()); + assertFalse(actualResult.complete()); assertThat( - Strings.toString(actualResult.getInformationContext()), + Strings.toString(actualResult.informationContext()), containsString("node with id [node1] is currently marked as shutting down") ); assertFalse(step.isCompletable()); @@ -625,8 +625,8 @@ private void assertAllocateStatus( .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) .build(); ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); - assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); + assertEquals(expectedResult.complete(), actualResult.complete()); + assertEquals(expectedResult.informationContext(), actualResult.informationContext()); } public static UnassignedInfo randomUnassignedInfo(String message) { @@ -649,7 +649,7 @@ public static UnassignedInfo randomUnassignedInfo(String message) { System.currentTimeMillis(), delayed, UnassignedInfo.AllocationStatus.NO_ATTEMPT, - Collections.emptySet(), + Set.of(), lastAllocatedNodeId ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java index 8eb8d0f395aba..23d24fbd28730 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java @@ -56,7 +56,7 @@ public void testStepCompleteIfTargetShardsCountIsValid() { CheckTargetShardsCountStep checkTargetShardsCountStep = new CheckTargetShardsCountStep(randomStepKey(), randomStepKey(), 2); ClusterStateWaitStep.Result result = checkTargetShardsCountStep.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); + assertThat(result.complete(), is(true)); } public void testStepIncompleteIfTargetShardsCountNotValid() { @@ -75,10 +75,10 @@ public void testStepIncompleteIfTargetShardsCountNotValid() { CheckTargetShardsCountStep checkTargetShardsCountStep = new CheckTargetShardsCountStep(randomStepKey(), randomStepKey(), 3); ClusterStateWaitStep.Result result = checkTargetShardsCountStep.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); assertThat( - info.getMessage(), + info.message(), is( "lifecycle action of policy [" + policyName diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java index 7ce078826b49a..ef7325be0a496 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseFollowerIndexStepTests.java @@ -13,7 +13,8 @@ import org.elasticsearch.index.IndexVersion; import org.mockito.Mockito; -import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -24,7 +25,7 @@ public class CloseFollowerIndexStepTests extends AbstractStepTestCase listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new CloseIndexResponse(true, true, Collections.emptyList())); + listener.onResponse(new CloseIndexResponse(true, true, List.of())); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); @@ -54,7 +55,7 @@ public void testRequestNotAcknowledged() { assertThat(closeIndexRequest.indices()[0], equalTo("follower-index")); @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new CloseIndexResponse(false, false, Collections.emptyList())); + listener.onResponse(new CloseIndexResponse(false, false, List.of())); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); @@ -85,7 +86,7 @@ public void testCloseFollowingIndexFailed() { public void testCloseFollowerIndexIsNoopForAlreadyClosedIndex() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .state(IndexMetadata.State.CLOSE) .numberOfShards(1) .numberOfReplicas(0) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java index 02fb49ac71adf..b546aeaa20687 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CloseIndexStepTests.java @@ -20,7 +20,7 @@ import org.mockito.Mockito; import org.mockito.stubbing.Answer; -import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.equalTo; @@ -77,9 +77,7 @@ public void testPerformAction() { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertThat(request.indices(), equalTo(new String[] { indexMetadata.getIndex().getName() })); - listener.onResponse( - new CloseIndexResponse(true, true, Collections.singletonList(new CloseIndexResponse.IndexResult(indexMetadata.getIndex()))) - ); + listener.onResponse(new CloseIndexResponse(true, true, List.of(new CloseIndexResponse.IndexResult(indexMetadata.getIndex())))); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java index ea583b51c4c28..eeddda4199665 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java @@ -20,7 +20,6 @@ import java.time.Clock; import java.time.Instant; import java.time.ZoneId; -import java.util.Collections; import java.util.Map; import java.util.UUID; @@ -70,8 +69,8 @@ public void testIndexIsMissingReturnsIncompleteResult() { new Index("testName", UUID.randomUUID().toString()), ClusterState.EMPTY_STATE ); - assertThat(result.isComplete(), is(false)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(false)); + assertThat(result.informationContext(), nullValue()); } public void testIsConditionMetForUnderlyingStep() { @@ -83,7 +82,7 @@ public void testIsConditionMetForUnderlyingStep() { .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "480h") ) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(System.currentTimeMillis()))) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -95,8 +94,8 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, randomStepKey()); ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), nullValue()); } { @@ -107,7 +106,7 @@ public void testIsConditionMetForUnderlyingStep() { .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "48h") ) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(System.currentTimeMillis()))) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -120,10 +119,10 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, randomStepKey()); ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - assertThat(result.getInformationContext(), notNullValue()); + assertThat(result.complete(), is(false)); + assertThat(result.informationContext(), notNullValue()); WaitForIndexingCompleteStep.IndexingNotCompleteInfo info = (WaitForIndexingCompleteStep.IndexingNotCompleteInfo) result - .getInformationContext(); + .informationContext(); assertThat( info.getMessage(), equalTo( @@ -140,7 +139,7 @@ public void testIsConditionMetForUnderlyingStep() { settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true") .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "1s") ) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(1234L))) .numberOfShards(1) .numberOfReplicas(0) @@ -154,8 +153,8 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, nextKeyOnThresholdBreach); ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), nullValue()); assertThat(underTest.getNextStepKey(), is(not(nextKeyOnThresholdBreach))); assertThat(underTest.getNextStepKey(), is(stepToExecute.getNextStepKey())); } @@ -168,7 +167,7 @@ public void testIsConditionMetForUnderlyingStep() { settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "false") .put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "1h") ) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(1234L))) .numberOfShards(1) .numberOfReplicas(0) @@ -184,11 +183,11 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, nextKeyOnThresholdBreach); ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), notNullValue()); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), notNullValue()); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); assertThat( - info.getMessage(), + info.message(), equalTo( "[" + currentStepKey.name() @@ -267,7 +266,7 @@ public boolean isRetryable() { new StepKey("phase", "action", "breached") ); - assertFalse(step.isConditionMet(indexMetadata.getIndex(), clusterState).isComplete()); + assertFalse(step.isConditionMet(indexMetadata.getIndex(), clusterState).complete()); assertThat(step.getNextStepKey().name(), equalTo("next-key")); @@ -290,7 +289,7 @@ public boolean isRetryable() { }, new StepKey("phase", "action", "breached") ); - assertTrue(step.isConditionMet(indexMetadata.getIndex(), clusterState).isComplete()); + assertTrue(step.isConditionMet(indexMetadata.getIndex(), clusterState).complete()); assertThat(step.getNextStepKey().name(), equalTo("breached")); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java index 95c1f5c4aa96b..51d1464ed5525 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java @@ -89,8 +89,8 @@ public void testExecuteWithUnassignedShard() { Result expectedResult = new Result(false, waitingForActiveShardsAllocationInfo(1)); Result actualResult = step.isConditionMet(index, clusterState); - assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInformationContext(), is(expectedResult.getInformationContext())); + assertThat(actualResult.complete(), is(false)); + assertThat(actualResult.informationContext(), is(expectedResult.informationContext())); } public void testExecuteWithPendingShards() { @@ -129,8 +129,8 @@ public void testExecuteWithPendingShards() { ); Result actualResult = step.isConditionMet(index, clusterState); - assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInformationContext(), is(expectedResult.getInformationContext())); + assertThat(actualResult.complete(), is(false)); + assertThat(actualResult.informationContext(), is(expectedResult.informationContext())); } public void testExecuteWithPendingShardsAndTargetRoleNotPresentInCluster() { @@ -163,8 +163,8 @@ public void testExecuteWithPendingShardsAndTargetRoleNotPresentInCluster() { ); Result actualResult = step.isConditionMet(index, clusterState); - assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInformationContext(), is(expectedResult.getInformationContext())); + assertThat(actualResult.complete(), is(false)); + assertThat(actualResult.informationContext(), is(expectedResult.informationContext())); } public void testExecuteIndexMissing() { @@ -174,8 +174,8 @@ public void testExecuteIndexMissing() { DataTierMigrationRoutedStep step = createRandomInstance(); Result actualResult = step.isConditionMet(index, clusterState); - assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInformationContext(), is(nullValue())); + assertThat(actualResult.complete(), is(false)); + assertThat(actualResult.informationContext(), is(nullValue())); } public void testExecuteIsComplete() { @@ -199,8 +199,8 @@ public void testExecuteIsComplete() { .build(); DataTierMigrationRoutedStep step = createRandomInstance(); Result result = step.isConditionMet(index, clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), is(nullValue())); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), is(nullValue())); } public void testExecuteWithGenericDataNodes() { @@ -220,8 +220,8 @@ public void testExecuteWithGenericDataNodes() { .build(); DataTierMigrationRoutedStep step = createRandomInstance(); Result result = step.isConditionMet(index, clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), is(nullValue())); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), is(nullValue())); } public void testExecuteForIndexWithoutTierRoutingInformationWaitsForReplicasToBeActive() { @@ -245,8 +245,8 @@ public void testExecuteForIndexWithoutTierRoutingInformationWaitsForReplicasToBe Result expectedResult = new Result(false, waitingForActiveShardsAllocationInfo(1)); Result result = step.isConditionMet(index, clusterState); - assertThat(result.isComplete(), is(false)); - assertThat(result.getInformationContext(), is(expectedResult.getInformationContext())); + assertThat(result.complete(), is(false)); + assertThat(result.informationContext(), is(expectedResult.informationContext())); } { @@ -266,8 +266,8 @@ public void testExecuteForIndexWithoutTierRoutingInformationWaitsForReplicasToBe DataTierMigrationRoutedStep step = createRandomInstance(); Result result = step.isConditionMet(index, clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), is(nullValue())); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), is(nullValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java index 937502281b64d..c4138d228719e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponseTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -81,7 +80,7 @@ protected boolean assertToXContentEquivalence() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java index aecf029a1357a..b8d480200fb5d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeActionTests.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.List; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -109,7 +108,7 @@ private void assertBestCompression(ForceMergeAction instance) { // available .skip(1) .map(s -> new Tuple<>(s.getKey(), s.getNextStepKey())) - .collect(Collectors.toList()); + .toList(); StepKey checkNotWriteIndex = new StepKey(phase, ForceMergeAction.NAME, CheckNotDataStreamWriteIndexStep.NAME); StepKey waitTimeSeriesEndTimePassesKey = new StepKey(phase, ForceMergeAction.NAME, WaitUntilTimeSeriesEndTimePassesStep.NAME); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java index bee6351582bc9..908e7b764f136 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; -import java.util.Collections; +import java.util.List; import java.util.Locale; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; @@ -82,7 +82,7 @@ private void testPerformAction(String policyName, String expectedPolicyName) { .metadata( Metadata.builder() .put(indexMetadata, false) - .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) + .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(List.of(repo))) .build() ) .build(); @@ -167,7 +167,7 @@ public void testPerformActionWillOverwriteCachedRepository() { .metadata( Metadata.builder() .put(indexMetadata, false) - .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) + .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(List.of(repo))) .build() ) .build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java index ea3c9cc5926ab..6fc98d4c2c728 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; +import java.util.List; import java.util.Objects; import java.util.function.Supplier; @@ -292,7 +292,7 @@ protected IndexLifecycleExplainResponse mutateInstance(IndexLifecycleExplainResp protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java index 753edfbe334b9..7dd6bfd209660 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.mockito.Mockito; -import java.util.Collections; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -56,7 +55,7 @@ public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { SearchRequest request = new SearchRequest("foo"); - final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, Collections.emptyMap()); + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, Map.of()); policyClient.execute(TransportSearchAction.TYPE, request, listener); latch.await(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java index 3e9fd0105feae..b58d7184f741c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,7 +36,7 @@ public void setup() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), new NamedWriteableRegistry.Entry( LifecycleType.class, @@ -65,7 +64,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java index 7963d04e0f666..1bea0ac6d192c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java @@ -20,8 +20,6 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -30,7 +28,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Function; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -48,7 +45,7 @@ protected LifecyclePolicy doParseInstance(XContentParser parser) { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry( LifecycleType.class, TimeseriesLifecycleType.TYPE, @@ -75,7 +72,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), @@ -150,18 +147,8 @@ public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String l ).stream() // Remove the frozen phase, we'll randomly re-add it later .filter(pn -> TimeseriesLifecycleType.FROZEN_PHASE.equals(pn) == false) - .collect(Collectors.toList()); - Map phases = Maps.newMapWithExpectedSize(phaseNames.size()); - Function> validActions = getPhaseToValidActions(); - Function randomAction = getNameToActionFunction(); - // as what actions end up in the hot phase influence what actions are allowed in the subsequent phases we'll move the hot phase - // at the front of the phases to process (if it exists) - if (phaseNames.contains(TimeseriesLifecycleType.HOT_PHASE)) { - phaseNames.remove(TimeseriesLifecycleType.HOT_PHASE); - phaseNames.add(0, TimeseriesLifecycleType.HOT_PHASE); - } - boolean hotPhaseContainsSearchableSnap = false; - boolean coldPhaseContainsSearchableSnap = false; + .toList(); + // let's order the phases so we can reason about actions in a previous phase in order to generate a random *valid* policy List orderedPhases = new ArrayList<>(phaseNames.size()); for (String validPhase : TimeseriesLifecycleType.ORDERED_VALID_PHASES) { @@ -170,6 +157,12 @@ public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String l } } + Map phases = Maps.newMapWithExpectedSize(phaseNames.size()); + Function> validActions = getPhaseToValidActions(); + Function randomAction = getNameToActionFunction(); + boolean hotPhaseContainsSearchableSnap = false; + boolean coldPhaseContainsSearchableSnap = false; + TimeValue prev = null; for (String phase : orderedPhases) { TimeValue after = prev == null @@ -222,7 +215,7 @@ public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String l new Phase( TimeseriesLifecycleType.FROZEN_PHASE, frozenTime, - Collections.singletonMap( + Map.of( SearchableSnapshotAction.NAME, new SearchableSnapshotAction( randomAlphaOfLength(10), @@ -304,11 +297,11 @@ protected LifecyclePolicy mutateInstance(LifecyclePolicy instance) { () -> randomFrom( TimeseriesLifecycleType.ORDERED_VALID_PHASES.stream() .filter(pn -> TimeseriesLifecycleType.FROZEN_PHASE.equals(pn) == false) - .collect(Collectors.toList()) + .toList() ) ); phases = new LinkedHashMap<>(phases); - phases.put(phaseName, new Phase(phaseName, null, Collections.emptyMap())); + phases.put(phaseName, new Phase(phaseName, null, Map.of())); } case 2 -> metadata = randomValueOtherThan(metadata, LifecyclePolicyTests::randomMeta); case 3 -> deprecated = instance.isDeprecated() ? randomFrom(false, null) : true; @@ -341,8 +334,8 @@ public void testToStepsWithOneStep() { lifecycleName = randomAlphaOfLengthBetween(1, 20); Map phases = new LinkedHashMap<>(); - LifecycleAction firstAction = new MockAction(Arrays.asList(mockStep)); - Map actions = Collections.singletonMap(MockAction.NAME, firstAction); + LifecycleAction firstAction = new MockAction(List.of(mockStep)); + Map actions = Map.of(MockAction.NAME, firstAction); Phase firstPhase = new Phase("test", TimeValue.ZERO, actions); phases.put(firstPhase.getName(), firstPhase); LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases, randomMeta()); @@ -376,10 +369,10 @@ public void testToStepsWithTwoPhases() { lifecycleName = randomAlphaOfLengthBetween(1, 20); Map phases = new LinkedHashMap<>(); - LifecycleAction firstAction = new MockAction(Arrays.asList(firstActionStep, firstActionAnotherStep)); - LifecycleAction secondAction = new MockAction(Arrays.asList(secondActionStep)); - Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); - Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + LifecycleAction firstAction = new MockAction(List.of(firstActionStep, firstActionAnotherStep)); + LifecycleAction secondAction = new MockAction(List.of(secondActionStep)); + Map firstActions = Map.of(MockAction.NAME, firstAction); + Map secondActions = Map.of(MockAction.NAME, secondAction); Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); phases.put(firstPhase.getName(), firstPhase); @@ -405,10 +398,10 @@ public void testToStepsWithTwoPhases() { public void testIsActionSafe() { Map phases = new LinkedHashMap<>(); - LifecycleAction firstAction = new MockAction(Collections.emptyList(), true); - LifecycleAction secondAction = new MockAction(Collections.emptyList(), false); - Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); - Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + LifecycleAction firstAction = new MockAction(List.of(), true); + LifecycleAction secondAction = new MockAction(List.of(), false); + Map firstActions = Map.of(MockAction.NAME, firstAction); + Map secondActions = Map.of(MockAction.NAME, secondAction); Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); phases.put(firstPhase.getName(), firstPhase); @@ -462,12 +455,9 @@ public void testValidatePolicyName() { public static Map randomMeta() { if (randomBoolean()) { if (randomBoolean()) { - return Collections.singletonMap(randomAlphaOfLength(4), randomAlphaOfLength(4)); + return Map.of(randomAlphaOfLength(4), randomAlphaOfLength(4)); } else { - return Collections.singletonMap( - randomAlphaOfLength(5), - Collections.singletonMap(randomAlphaOfLength(4), randomAlphaOfLength(4)) - ); + return Map.of(randomAlphaOfLength(5), Map.of(randomAlphaOfLength(4), randomAlphaOfLength(4))); } } else { return null; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java index 3efe2dc04ea19..978486c6c0d39 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.test.ESTestCase; -import java.util.Arrays; -import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +40,7 @@ public void testCalculateUsage() { ClusterState state = ClusterState.builder(new ClusterName("mycluster")).build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())) + equalTo(new ItemUsage(List.of(), List.of(), List.of())) ); } @@ -52,7 +52,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -61,7 +61,7 @@ public void testCalculateUsage() { .build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())) + equalTo(new ItemUsage(List.of(), List.of(), List.of())) ); } @@ -73,7 +73,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -86,7 +86,7 @@ public void testCalculateUsage() { .build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.singleton("myindex"), Collections.emptyList(), Collections.emptyList())) + equalTo(new ItemUsage(List.of("myindex"), List.of(), List.of())) ); } @@ -98,7 +98,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -109,10 +109,10 @@ public void testCalculateUsage() { .putCustom( ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata( - Collections.singletonMap( + Map.of( "mytemplate", ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList("myds")) + .indexPatterns(List.of("myds")) .template( new Template( Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), @@ -130,7 +130,7 @@ public void testCalculateUsage() { .build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo(new ItemUsage(Collections.singleton("myindex"), Collections.emptyList(), Collections.singleton("mytemplate"))) + equalTo(new ItemUsage(List.of("myindex"), List.of(), List.of("mytemplate"))) ); } @@ -139,7 +139,7 @@ public void testCalculateUsage() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), + Map.of("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING ) ) @@ -159,10 +159,10 @@ public void testCalculateUsage() { .putCustom( ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata( - Collections.singletonMap( + Map.of( "mytemplate", ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList("myds")) + .indexPatterns(List.of("myds")) .template( new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), null, null) ) @@ -172,15 +172,13 @@ public void testCalculateUsage() { ) ); // Need to get the real Index instance of myindex: - mBuilder.put(DataStreamTestHelper.newInstance("myds", Collections.singletonList(mBuilder.get("myindex").getIndex()))); + mBuilder.put(DataStreamTestHelper.newInstance("myds", List.of(mBuilder.get("myindex").getIndex()))); // Test where policy exists and is used by an index, datastream, and template ClusterState state = ClusterState.builder(new ClusterName("mycluster")).metadata(mBuilder.build()).build(); assertThat( LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), - equalTo( - new ItemUsage(Arrays.asList("myindex", "another"), Collections.singleton("myds"), Collections.singleton("mytemplate")) - ) + equalTo(new ItemUsage(List.of("myindex", "another"), List.of("myds"), List.of("mytemplate"))) ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java index e849512aa8f73..79f8a051abe25 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java @@ -15,14 +15,12 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; public class MockAction implements LifecycleAction { public static final String NAME = "TEST_ACTION"; - private List steps; + private final List steps; private static final ObjectParser PARSER = new ObjectParser<>(NAME, MockAction::new); private final boolean safe; @@ -32,7 +30,7 @@ public static MockAction parse(XContentParser parser) { } public MockAction() { - this(Collections.emptyList()); + this(List.of()); } public MockAction(List steps) { @@ -77,7 +75,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(steps.stream().map(MockStep::new).collect(Collectors.toList())); + out.writeCollection(steps.stream().map(MockStep::new).toList()); out.writeBoolean(safe); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java index 8ca7a00ab0948..7ccdb1a27326a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.LifecycleExecutionState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.test.client.NoOpClient; @@ -42,7 +43,7 @@ public MountSnapshotStep createRandomInstance() { String restoredIndexPrefix = randomAlphaOfLength(10); MountSearchableSnapshotRequest.Storage storage = randomStorageType(); Integer totalShardsPerNode = randomTotalShardsPerNode(true); - return new MountSnapshotStep(stepKey, nextStepKey, client, restoredIndexPrefix, storage, totalShardsPerNode); + return new MountSnapshotStep(stepKey, nextStepKey, client, restoredIndexPrefix, storage, totalShardsPerNode, 0); } public static MountSearchableSnapshotRequest.Storage randomStorageType() { @@ -61,7 +62,8 @@ protected MountSnapshotStep copyInstance(MountSnapshotStep instance) { instance.getClient(), instance.getRestoredIndexPrefix(), instance.getStorage(), - instance.getTotalShardsPerNode() + instance.getTotalShardsPerNode(), + instance.getReplicas() ); } @@ -72,7 +74,8 @@ public MountSnapshotStep mutateInstance(MountSnapshotStep instance) { String restoredIndexPrefix = instance.getRestoredIndexPrefix(); MountSearchableSnapshotRequest.Storage storage = instance.getStorage(); Integer totalShardsPerNode = instance.getTotalShardsPerNode(); - switch (between(0, 4)) { + int replicas = instance.getReplicas(); + switch (between(0, 5)) { case 0: key = new StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5)); break; @@ -94,10 +97,13 @@ public MountSnapshotStep mutateInstance(MountSnapshotStep instance) { case 4: totalShardsPerNode = totalShardsPerNode == null ? 1 : totalShardsPerNode + randomIntBetween(1, 100); break; + case 5: + replicas = replicas == 0 ? 1 : 0; // swap between 0 and 1 + break; default: throw new AssertionError("Illegal randomisation branch"); } - return new MountSnapshotStep(key, nextKey, instance.getClient(), restoredIndexPrefix, storage, totalShardsPerNode); + return new MountSnapshotStep(key, nextKey, instance.getClient(), restoredIndexPrefix, storage, totalShardsPerNode, replicas); } public void testCreateWithInvalidTotalShardsPerNode() throws Exception { @@ -111,7 +117,8 @@ public void testCreateWithInvalidTotalShardsPerNode() throws Exception { client, RESTORED_INDEX_PREFIX, randomStorageType(), - invalidTotalShardsPerNode + invalidTotalShardsPerNode, + 0 ) ); assertEquals("[total_shards_per_node] must be >= 1", exception.getMessage()); @@ -195,14 +202,18 @@ public void testPerformAction() throws Exception { indexName, RESTORED_INDEX_PREFIX, indexName, - new String[] { LifecycleSettings.LIFECYCLE_NAME } + new String[] { LifecycleSettings.LIFECYCLE_NAME }, + null, + 0 ); MountSnapshotStep step = new MountSnapshotStep( randomStepKey(), randomStepKey(), client, RESTORED_INDEX_PREFIX, - randomStorageType() + randomStorageType(), + null, + 0 ); performActionAndWait(step, indexMetadata, clusterState, null); } @@ -237,7 +248,9 @@ public void testResponseStatusHandling() throws Exception { randomStepKey(), clientPropagatingOKResponse, RESTORED_INDEX_PREFIX, - randomStorageType() + randomStorageType(), + null, + 0 ); performActionAndWait(step, indexMetadata, clusterState, null); } @@ -252,7 +265,9 @@ public void testResponseStatusHandling() throws Exception { randomStepKey(), clientPropagatingACCEPTEDResponse, RESTORED_INDEX_PREFIX, - randomStorageType() + randomStorageType(), + null, + 0 ); performActionAndWait(step, indexMetadata, clusterState, null); } @@ -289,47 +304,49 @@ public void testMountWithPartialAndRestoredPrefix() throws Exception { ); } - public void doTestMountWithoutSnapshotIndexNameInState(String prefix) throws Exception { - { - String indexNameSnippet = randomAlphaOfLength(10); - String indexName = prefix + indexNameSnippet; - String policyName = "test-ilm-policy"; - Map ilmCustom = new HashMap<>(); - String snapshotName = indexName + "-" + policyName; - ilmCustom.put("snapshot_name", snapshotName); - String repository = "repository"; - ilmCustom.put("snapshot_repository", repository); + private void doTestMountWithoutSnapshotIndexNameInState(String prefix) throws Exception { + String indexNameSnippet = randomAlphaOfLength(10); + String indexName = prefix + indexNameSnippet; + String policyName = "test-ilm-policy"; + Map ilmCustom = new HashMap<>(); + String snapshotName = indexName + "-" + policyName; + ilmCustom.put("snapshot_name", snapshotName); + String repository = "repository"; + ilmCustom.put("snapshot_repository", repository); - IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) - .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) - .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom) - .numberOfShards(randomIntBetween(1, 5)) - .numberOfReplicas(randomIntBetween(0, 5)); - IndexMetadata indexMetadata = indexMetadataBuilder.build(); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom) + .numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)); + IndexMetadata indexMetadata = indexMetadataBuilder.build(); - ClusterState clusterState = ClusterState.builder(emptyClusterState()) - .metadata(Metadata.builder().put(indexMetadata, true).build()) - .build(); + ClusterState clusterState = ClusterState.builder(emptyClusterState()) + .metadata(Metadata.builder().put(indexMetadata, true).build()) + .build(); - try (var threadPool = createThreadPool()) { - final var client = getRestoreSnapshotRequestAssertingClient( - threadPool, - repository, - snapshotName, - indexName, - RESTORED_INDEX_PREFIX, - indexNameSnippet, - new String[] { LifecycleSettings.LIFECYCLE_NAME } - ); - MountSnapshotStep step = new MountSnapshotStep( - randomStepKey(), - randomStepKey(), - client, - RESTORED_INDEX_PREFIX, - randomStorageType() - ); - performActionAndWait(step, indexMetadata, clusterState, null); - } + try (var threadPool = createThreadPool()) { + final var client = getRestoreSnapshotRequestAssertingClient( + threadPool, + repository, + snapshotName, + indexName, + RESTORED_INDEX_PREFIX, + indexNameSnippet, + new String[] { LifecycleSettings.LIFECYCLE_NAME }, + null, + 0 + ); + MountSnapshotStep step = new MountSnapshotStep( + randomStepKey(), + randomStepKey(), + client, + RESTORED_INDEX_PREFIX, + randomStorageType(), + null, + 0 + ); + performActionAndWait(step, indexMetadata, clusterState, null); } } @@ -361,7 +378,11 @@ public void testIgnoreTotalShardsPerNodeInFrozenPhase() throws Exception { indexName, RESTORED_INDEX_PREFIX, indexName, - new String[] { LifecycleSettings.LIFECYCLE_NAME, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() } + new String[] { + LifecycleSettings.LIFECYCLE_NAME, + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() }, + null, + 0 ); MountSnapshotStep step = new MountSnapshotStep( new StepKey(TimeseriesLifecycleType.FROZEN_PHASE, randomAlphaOfLength(10), randomAlphaOfLength(10)), @@ -369,13 +390,14 @@ public void testIgnoreTotalShardsPerNodeInFrozenPhase() throws Exception { client, RESTORED_INDEX_PREFIX, randomStorageType(), - null + null, + 0 ); performActionAndWait(step, indexMetadata, clusterState, null); } } - public void testDoNotIgnoreTotalShardsPerNodeIfSet() throws Exception { + public void testDoNotIgnoreTotalShardsPerNodeAndReplicasIfSet() throws Exception { String indexName = randomAlphaOfLength(10); String policyName = "test-ilm-policy"; Map ilmCustom = new HashMap<>(); @@ -395,6 +417,9 @@ public void testDoNotIgnoreTotalShardsPerNodeIfSet() throws Exception { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); + final Integer totalShardsPerNode = randomTotalShardsPerNode(false); + final int replicas = randomIntBetween(1, 5); + try (var threadPool = createThreadPool()) { final var client = getRestoreSnapshotRequestAssertingClient( threadPool, @@ -403,7 +428,9 @@ public void testDoNotIgnoreTotalShardsPerNodeIfSet() throws Exception { indexName, RESTORED_INDEX_PREFIX, indexName, - new String[] { LifecycleSettings.LIFECYCLE_NAME } + new String[] { LifecycleSettings.LIFECYCLE_NAME }, + totalShardsPerNode, + replicas ); MountSnapshotStep step = new MountSnapshotStep( new StepKey(TimeseriesLifecycleType.FROZEN_PHASE, randomAlphaOfLength(10), randomAlphaOfLength(10)), @@ -411,7 +438,8 @@ public void testDoNotIgnoreTotalShardsPerNodeIfSet() throws Exception { client, RESTORED_INDEX_PREFIX, randomStorageType(), - randomTotalShardsPerNode(false) + totalShardsPerNode, + replicas ); performActionAndWait(step, indexMetadata, clusterState, null); } @@ -439,7 +467,9 @@ private NoOpClient getRestoreSnapshotRequestAssertingClient( String indexName, String restoredIndexPrefix, String expectedSnapshotIndexName, - String[] expectedIgnoredIndexSettings + String[] expectedIgnoredIndexSettings, + @Nullable Integer totalShardsPerNode, + int replicas ) { return new NoOpClient(threadPool) { @Override @@ -462,6 +492,31 @@ protected void assertThat(mountSearchableSnapshotRequest.mountedIndexName(), is(restoredIndexPrefix + indexName)); assertThat(mountSearchableSnapshotRequest.snapshotIndexName(), is(expectedSnapshotIndexName)); + if (totalShardsPerNode != null) { + Integer totalShardsPerNodeSettingValue = ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get( + mountSearchableSnapshotRequest.indexSettings() + ); + assertThat(totalShardsPerNodeSettingValue, is(totalShardsPerNode)); + } else { + assertThat( + mountSearchableSnapshotRequest.indexSettings() + .hasValue(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey()), + is(false) + ); + } + + if (replicas > 0) { + Integer numberOfReplicasSettingValue = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get( + mountSearchableSnapshotRequest.indexSettings() + ); + assertThat(numberOfReplicasSettingValue, is(replicas)); + } else { + assertThat( + mountSearchableSnapshotRequest.indexSettings().hasValue(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey()), + is(false) + ); + } + // invoke the awaiting listener with a very generic 'response', just to fulfill the contract listener.onResponse((Response) new RestoreSnapshotResponse((RestoreInfo) null)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java index 9871cb79b595b..475161676f2e8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleStats; -import java.util.Collections; import java.util.Map; import java.util.Optional; @@ -97,9 +96,9 @@ private OperationMode executeILMUpdate( OperationMode requestMode, boolean assertSameClusterState ) { - IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Map.of(), currentMode); SnapshotLifecycleMetadata snapshotLifecycleMetadata = new SnapshotLifecycleMetadata( - Collections.emptyMap(), + Map.of(), currentMode, new SnapshotLifecycleStats() ); @@ -131,9 +130,9 @@ private OperationMode executeSLMUpdate( OperationMode requestMode, boolean assertSameClusterState ) { - IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Map.of(), currentMode); SnapshotLifecycleMetadata snapshotLifecycleMetadata = new SnapshotLifecycleMetadata( - Collections.emptyMap(), + Map.of(), currentMode, new SnapshotLifecycleStats() ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java index 51ebc98176955..da5d6eddfc72d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.core.ccr.action.ShardFollowTask; import org.mockito.Mockito; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -38,7 +38,7 @@ protected PauseFollowerIndexStep newInstance(Step.StepKey key, Step.StepKey next public void testPauseFollowingIndex() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -60,7 +60,7 @@ public void testPauseFollowingIndex() throws Exception { public void testRequestNotAcknowledged() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -81,7 +81,7 @@ public void testRequestNotAcknowledged() { public void testPauseFollowingIndexFailed() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -107,7 +107,7 @@ public void testPauseFollowingIndexFailed() { public final void testNoShardFollowPersistentTasks() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("managed-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -138,7 +138,7 @@ public final void testNoShardFollowTasksForManagedIndex() throws Exception { IndexMetadata followerIndex = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current())) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -171,7 +171,7 @@ private static ClusterState setupClusterStateWithFollowingIndex(IndexMetadata fo new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), - Collections.emptyMap() + Map.of() ), null ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java index 952741032fc90..7e78a81776a7a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagementTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.xcontent.ParseField; import java.io.IOException; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -84,9 +83,9 @@ public void testRefreshPhaseJson() throws IOException { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); ClusterState existingState = ClusterState.builder(ClusterState.EMPTY_STATE) .metadata(Metadata.builder(Metadata.EMPTY_METADATA).put(meta, false).build()) @@ -315,7 +314,7 @@ public void testIndexCanBeSafelyUpdated() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertTrue(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -351,7 +350,7 @@ public void testIndexCanBeSafelyUpdated() { Map actions = new HashMap<>(); actions.put("set_priority", new SetPriorityAction(150)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -390,7 +389,7 @@ public void testIndexCanBeSafelyUpdated() { new RolloverAction(null, null, TimeValue.timeValueSeconds(5), null, null, null, null, null, null, null) ); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -422,7 +421,7 @@ public void testIndexCanBeSafelyUpdated() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -443,7 +442,7 @@ public void testIndexCanBeSafelyUpdated() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); assertFalse(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -482,16 +481,16 @@ public void testUpdateIndicesForPolicy() throws IOException { oldActions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); oldActions.put("set_priority", new SetPriorityAction(100)); Phase oldHotPhase = new Phase("hot", TimeValue.ZERO, oldActions); - Map oldPhases = Collections.singletonMap("hot", oldHotPhase); + Map oldPhases = Map.of("hot", oldHotPhase); LifecyclePolicy oldPolicy = new LifecyclePolicy("my-policy", oldPhases); Map actions = new HashMap<>(); actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); assertTrue(isIndexPhaseDefinitionUpdatable(REGISTRY, client, meta, newPolicy, null)); @@ -509,9 +508,9 @@ public void testUpdateIndicesForPolicy() throws IOException { actions.put("rollover", new RolloverAction(null, null, null, 2L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(150)); hotPhase = new Phase("hot", TimeValue.ZERO, actions); - phases = Collections.singletonMap("hot", hotPhase); + phases = Map.of("hot", hotPhase); newPolicy = new LifecyclePolicy("my-policy", phases); - policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); logger.info("--> update with changed policy, but not configured in settings"); updatedState = updateIndicesForPolicy(existingState, REGISTRY, client, oldPolicy, policyMetadata, null); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java index 7622118d2b99f..ce477a07c2f0b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseExecutionInfoTests.java @@ -18,7 +18,7 @@ import org.junit.Before; import java.io.IOException; -import java.util.Arrays; +import java.util.List; public class PhaseExecutionInfoTests extends AbstractXContentSerializingTestCase { @@ -71,7 +71,7 @@ protected PhaseExecutionInfo mutateInstance(PhaseExecutionInfo instance) { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java index bf925c4282fc1..5a194b48f7701 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PhaseTests.java @@ -18,9 +18,8 @@ import org.junit.Before; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -42,9 +41,9 @@ static Phase randomTestPhase(String phaseName) { if (randomBoolean()) { after = randomTimeValue(0, 1_000_000_000, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS); } - Map actions = Collections.emptyMap(); + Map actions = Map.of(); if (randomBoolean()) { - actions = Collections.singletonMap(MockAction.NAME, new MockAction()); + actions = Map.of(MockAction.NAME, new MockAction()); } return new Phase(phaseName, after, actions); } @@ -61,7 +60,7 @@ protected Reader instanceReader() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) + List.of(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new)) ); } @@ -85,7 +84,7 @@ protected Phase mutateInstance(Phase instance) { case 1 -> after = TimeValue.timeValueSeconds(after.getSeconds() + randomIntBetween(1, 1000)); case 2 -> { actions = new HashMap<>(actions); - actions.put(MockAction.NAME + "another", new MockAction(Collections.emptyList())); + actions.put(MockAction.NAME + "another", new MockAction(List.of())); } default -> throw new AssertionError("Illegal randomisation branch"); } @@ -93,7 +92,7 @@ protected Phase mutateInstance(Phase instance) { } public void testDefaultAfter() { - Phase phase = new Phase(randomAlphaOfLength(20), null, Collections.emptyMap()); + Phase phase = new Phase(randomAlphaOfLength(20), null, Map.of()); assertEquals(TimeValue.ZERO, phase.getMinimumAge()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java index 4af25d094f5fe..3683690763d93 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java @@ -23,9 +23,9 @@ import org.hamcrest.Matchers; import org.mockito.Mockito; -import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.mockito.Mockito.verifyNoMoreInteractions; @@ -185,7 +185,7 @@ private void mockClientRolloverCall(String rolloverTarget) { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertRolloverIndexRequest(request, rolloverTarget); - listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), true, true, true, false)); + listener.onResponse(new RolloverResponse(null, null, Map.of(), request.isDryRun(), true, true, true, false)); return null; }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); } @@ -214,11 +214,7 @@ public void testPerformActionSkipsRolloverForAlreadyRolledIndex() throws Excepti .putAlias(AliasMetadata.builder(rolloverAlias)) .settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias)) .putRolloverInfo( - new RolloverInfo( - rolloverAlias, - Collections.singletonList(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), - System.currentTimeMillis() - ) + new RolloverInfo(rolloverAlias, List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis()) ) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotActionTests.java index ca219fdde3d57..5304b7885f96c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotActionTests.java @@ -14,6 +14,8 @@ import java.io.IOException; import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; import static org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction.NAME; import static org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction.TOTAL_SHARDS_PER_NODE; @@ -29,40 +31,23 @@ public void testToSteps() { StepKey nextStepKey = new StepKey(phase, randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(1, 5)); List steps = action.toSteps(null, phase, nextStepKey, null); - assertThat(steps.size(), is(action.isForceMergeIndex() ? 19 : 17)); - - List expectedSteps = action.isForceMergeIndex() - ? expectedStepKeysWithForceMerge(phase) - : expectedStepKeysNoForceMerge(phase); - - assertThat(steps.get(0).getKey(), is(expectedSteps.get(0))); - assertThat(steps.get(1).getKey(), is(expectedSteps.get(1))); - assertThat(steps.get(2).getKey(), is(expectedSteps.get(2))); - assertThat(steps.get(3).getKey(), is(expectedSteps.get(3))); - assertThat(steps.get(4).getKey(), is(expectedSteps.get(4))); - assertThat(steps.get(5).getKey(), is(expectedSteps.get(5))); - assertThat(steps.get(6).getKey(), is(expectedSteps.get(6))); - assertThat(steps.get(7).getKey(), is(expectedSteps.get(7))); - assertThat(steps.get(8).getKey(), is(expectedSteps.get(8))); - assertThat(steps.get(9).getKey(), is(expectedSteps.get(9))); - assertThat(steps.get(10).getKey(), is(expectedSteps.get(10))); - assertThat(steps.get(11).getKey(), is(expectedSteps.get(11))); - assertThat(steps.get(12).getKey(), is(expectedSteps.get(12))); - assertThat(steps.get(13).getKey(), is(expectedSteps.get(13))); - assertThat(steps.get(14).getKey(), is(expectedSteps.get(14))); - assertThat(steps.get(15).getKey(), is(expectedSteps.get(15))); - - if (action.isForceMergeIndex()) { - assertThat(steps.get(16).getKey(), is(expectedSteps.get(16))); - assertThat(steps.get(17).getKey(), is(expectedSteps.get(17))); - CreateSnapshotStep createSnapshotStep = (CreateSnapshotStep) steps.get(9); - assertThat(createSnapshotStep.getNextKeyOnIncomplete(), is(expectedSteps.get(8))); - validateWaitForDataTierStep(phase, steps, 10, 11); - } else { - CreateSnapshotStep createSnapshotStep = (CreateSnapshotStep) steps.get(7); - assertThat(createSnapshotStep.getNextKeyOnIncomplete(), is(expectedSteps.get(6))); - validateWaitForDataTierStep(phase, steps, 8, 9); + + List expectedSteps = expectedStepKeys(phase, action.isForceMergeIndex()); + assertThat(steps.size(), is(expectedSteps.size())); + for (int i = 0; i < expectedSteps.size(); i++) { + assertThat("steps match expectation at index " + i, steps.get(i).getKey(), is(expectedSteps.get(i))); + } + + int index = -1; + for (int i = 0; i < expectedSteps.size(); i++) { + if (expectedSteps.get(i).name().equals(CreateSnapshotStep.NAME)) { + index = i; + break; + } } + CreateSnapshotStep createSnapshotStep = (CreateSnapshotStep) steps.get(index); + assertThat(createSnapshotStep.getNextKeyOnIncomplete(), is(expectedSteps.get(index - 1))); + validateWaitForDataTierStep(phase, steps, index + 1, index + 2); } private void validateWaitForDataTierStep(String phase, List steps, int waitForDataTierStepIndex, int mountStepIndex) { @@ -108,15 +93,15 @@ public void testCreateWithInvalidTotalShardsPerNode() { assertEquals("[" + TOTAL_SHARDS_PER_NODE.getPreferredName() + "] must be >= 1", exception.getMessage()); } - private List expectedStepKeysWithForceMerge(String phase) { - return List.of( + private List expectedStepKeys(String phase, boolean forceMergeIndex) { + return Stream.of( new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_SKIP_ACTION_STEP), new StepKey(phase, NAME, CheckNotDataStreamWriteIndexStep.NAME), new StepKey(phase, NAME, WaitForNoFollowersStep.NAME), new StepKey(phase, NAME, WaitUntilTimeSeriesEndTimePassesStep.NAME), new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_SKIP_GENERATE_AND_CLEAN), - new StepKey(phase, NAME, ForceMergeStep.NAME), - new StepKey(phase, NAME, SegmentCountStep.NAME), + forceMergeIndex ? new StepKey(phase, NAME, ForceMergeStep.NAME) : null, + forceMergeIndex ? new StepKey(phase, NAME, SegmentCountStep.NAME) : null, new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME), new StepKey(phase, NAME, CleanupSnapshotStep.NAME), new StepKey(phase, NAME, CreateSnapshotStep.NAME), @@ -129,29 +114,7 @@ private List expectedStepKeysWithForceMerge(String phase) { new StepKey(phase, NAME, ReplaceDataStreamBackingIndexStep.NAME), new StepKey(phase, NAME, DeleteStep.NAME), new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME) - ); - } - - private List expectedStepKeysNoForceMerge(String phase) { - return List.of( - new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_SKIP_ACTION_STEP), - new StepKey(phase, NAME, CheckNotDataStreamWriteIndexStep.NAME), - new StepKey(phase, NAME, WaitForNoFollowersStep.NAME), - new StepKey(phase, NAME, WaitUntilTimeSeriesEndTimePassesStep.NAME), - new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_SKIP_GENERATE_AND_CLEAN), - new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME), - new StepKey(phase, NAME, CleanupSnapshotStep.NAME), - new StepKey(phase, NAME, CreateSnapshotStep.NAME), - new StepKey(phase, NAME, WaitForDataTierStep.NAME), - new StepKey(phase, NAME, MountSnapshotStep.NAME), - new StepKey(phase, NAME, WaitForIndexColorStep.NAME), - new StepKey(phase, NAME, CopyExecutionStateStep.NAME), - new StepKey(phase, NAME, CopySettingsStep.NAME), - new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_DATASTREAM_CHECK_KEY), - new StepKey(phase, NAME, ReplaceDataStreamBackingIndexStep.NAME), - new StepKey(phase, NAME, DeleteStep.NAME), - new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME) - ); + ).filter(Objects::nonNull).toList(); } @Override @@ -172,8 +135,16 @@ protected Writeable.Reader instanceReader() { @Override protected SearchableSnapshotAction mutateInstance(SearchableSnapshotAction instance) { return switch (randomIntBetween(0, 2)) { - case 0 -> new SearchableSnapshotAction(randomAlphaOfLengthBetween(5, 10), instance.isForceMergeIndex()); - case 1 -> new SearchableSnapshotAction(instance.getSnapshotRepository(), instance.isForceMergeIndex() == false); + case 0 -> new SearchableSnapshotAction( + randomAlphaOfLengthBetween(5, 10), + instance.isForceMergeIndex(), + instance.getTotalShardsPerNode() + ); + case 1 -> new SearchableSnapshotAction( + instance.getSnapshotRepository(), + instance.isForceMergeIndex() == false, + instance.getTotalShardsPerNode() + ); case 2 -> new SearchableSnapshotAction( instance.getSnapshotRepository(), instance.isForceMergeIndex(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepInfoTests.java index 7aeeba557ee54..9e0c7c7c6b167 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepInfoTests.java @@ -38,7 +38,7 @@ public final void testEqualsAndHashcode() { } protected final Info copyInstance(Info instance) throws IOException { - return new Info(instance.getNumberShardsLeftToMerge()); + return new Info(instance.numberShardsLeftToMerge()); } protected Info mutateInstance(Info instance) throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java index 1d14bfb261fc2..9f04e202022c9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java @@ -77,14 +77,14 @@ public void testIsConditionMet() { ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; IndexShardSegments indexShardSegments = new IndexShardSegments(ShardId.fromString("[idx][123]"), shardSegmentsArray); - Map indexShards = Collections.singletonMap(0, indexShardSegments); + Map indexShards = Map.of(0, indexShardSegments); Spliterator iss = indexShards.values().spliterator(); List segments = new ArrayList<>(); for (int i = 0; i < maxNumSegments - randomIntBetween(0, 3); i++) { segments.add(null); } Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); - Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Map.of(index.getName(), indexSegments)); Mockito.when(indexSegments.spliterator()).thenReturn(iss); Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); @@ -129,14 +129,14 @@ public void testIsConditionIsTrueEvenWhenMoreSegments() { ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; IndexShardSegments indexShardSegments = new IndexShardSegments(ShardId.fromString("[idx][123]"), shardSegmentsArray); - Map indexShards = Collections.singletonMap(0, indexShardSegments); + Map indexShards = Map.of(0, indexShardSegments); Spliterator iss = indexShards.values().spliterator(); List segments = new ArrayList<>(); for (int i = 0; i < maxNumSegments + randomIntBetween(1, 3); i++) { segments.add(null); } Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); - Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Map.of(index.getName(), indexSegments)); Mockito.when(indexSegments.spliterator()).thenReturn(iss); Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); @@ -181,7 +181,7 @@ public void testFailedToRetrieveSomeSegments() { ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; IndexShardSegments indexShardSegments = new IndexShardSegments(ShardId.fromString("[idx][123]"), shardSegmentsArray); - Map indexShards = Collections.singletonMap(0, indexShardSegments); + Map indexShards = Map.of(0, indexShardSegments); Spliterator iss = indexShards.values().spliterator(); List segments = new ArrayList<>(); for (int i = 0; i < maxNumSegments + randomIntBetween(1, 3); i++) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java index a33d6e3332a40..60fa69708e111 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java @@ -28,8 +28,8 @@ import org.mockito.Mockito; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -203,11 +203,11 @@ public void assertPerformAction( LifecyclePolicy policy = new LifecyclePolicy( lifecycleName, - Collections.singletonMap("warm", new Phase("warm", TimeValue.ZERO, Collections.singletonMap(action.getWriteableName(), action))) + Map.of("warm", new Phase("warm", TimeValue.ZERO, Map.of(action.getWriteableName(), action))) ); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -216,10 +216,7 @@ public void assertPerformAction( Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.RUNNING - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.RUNNING) ) .put( indexMetadataBuilder.putCustom( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java index 7a03343b461de..c8efce288260f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStepTests.java @@ -17,7 +17,6 @@ import org.mockito.Mockito; import org.mockito.stubbing.Answer; -import java.util.Arrays; import java.util.List; import static org.elasticsearch.xpack.core.ilm.ShrinkIndexNameSupplier.SHRUNKEN_INDEX_PREFIX; @@ -73,7 +72,7 @@ public void testPerformAction() throws Exception { String sourceIndex = indexMetadata.getIndex().getName(); String shrunkenIndex = SHRUNKEN_INDEX_PREFIX + sourceIndex; - List expectedAliasActions = Arrays.asList( + List expectedAliasActions = List.of( IndicesAliasesRequest.AliasActions.removeIndex().index(sourceIndex), IndicesAliasesRequest.AliasActions.add().index(shrunkenIndex).alias(sourceIndex), IndicesAliasesRequest.AliasActions.add() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java index 257df32b4d950..b138339c25197 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkStepTests.java @@ -21,8 +21,8 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.mockito.Mockito; -import java.util.Collections; import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.common.IndexNameGenerator.generateValidIndexName; @@ -101,7 +101,7 @@ public void testPerformAction() throws Exception { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertThat(request.getSourceIndex(), equalTo(sourceIndexMetadata.getIndex().getName())); - assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.emptySet())); + assertThat(request.getTargetIndexRequest().aliases(), equalTo(Set.of())); Settings.Builder builder = Settings.builder(); builder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetadata.getNumberOfReplicas()) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java index 59eff971c1643..592d259f07069 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java @@ -94,8 +94,8 @@ public void testConditionMet() { .build(); Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); - assertTrue(result.isComplete()); - assertNull(result.getInformationContext()); + assertTrue(result.complete()); + assertNull(result.informationContext()); } public void testConditionNotMetBecauseOfActive() { @@ -137,8 +137,8 @@ public void testConditionNotMetBecauseOfActive() { .build(); Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); - assertFalse(result.isComplete()); - assertEquals(new ShrunkShardsAllocatedStep.Info(true, shrinkNumberOfShards, false), result.getInformationContext()); + assertFalse(result.complete()); + assertEquals(new ShrunkShardsAllocatedStep.Info(true, shrinkNumberOfShards, false), result.informationContext()); } public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { @@ -166,7 +166,7 @@ public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { .build(); Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); - assertFalse(result.isComplete()); - assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.getInformationContext()); + assertFalse(result.complete()); + assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.informationContext()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java index 523404a00a0c5..4eb49df7f89c5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java @@ -59,8 +59,8 @@ public void testConditionMet() { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertTrue(result.isComplete()); - assertNull(result.getInformationContext()); + assertTrue(result.complete()); + assertNull(result.informationContext()); } public void testConditionNotMetBecauseNotSameShrunkenIndex() { @@ -77,8 +77,8 @@ public void testConditionNotMetBecauseNotSameShrunkenIndex() { .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); - assertFalse(result.isComplete()); - assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInformationContext()); + assertFalse(result.complete()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.informationContext()); } public void testConditionNotMetBecauseSourceIndexExists() { @@ -101,8 +101,8 @@ public void testConditionNotMetBecauseSourceIndexExists() { .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); - assertFalse(result.isComplete()); - assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInformationContext()); + assertFalse(result.complete()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.informationContext()); } public void testIllegalState() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java index f9f06b10ad2f9..1a99043b86ad7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Arrays; import java.util.List; import java.util.function.BiFunction; @@ -92,7 +91,7 @@ public void testPerformAction() { String targetIndexPrefix = "index_prefix"; String targetIndexName = targetIndexPrefix + sourceIndexName; - List expectedAliasActions = Arrays.asList( + List expectedAliasActions = List.of( AliasActions.removeIndex().index(sourceIndexName), AliasActions.add().index(targetIndexName).alias(sourceIndexName), AliasActions.add() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java index 55fa3792fa6c7..f7d1ff5294f58 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java @@ -13,9 +13,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -51,13 +49,7 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase { - private static final AllocateAction TEST_ALLOCATE_ACTION = new AllocateAction( - 2, - 20, - Collections.singletonMap("node", "node1"), - null, - null - ); + private static final AllocateAction TEST_ALLOCATE_ACTION = new AllocateAction(2, 20, Map.of("node", "node1"), null, null); private static final DeleteAction TEST_DELETE_ACTION = DeleteAction.WITH_SNAPSHOT_DELETE; private static final WaitForSnapshotAction TEST_WAIT_FOR_SNAPSHOT_ACTION = new WaitForSnapshotAction("policy"); @@ -91,7 +83,7 @@ public void testValidatePhases() { if (invalid) { phaseName += randomAlphaOfLength(5); } - Map phases = Collections.singletonMap(phaseName, new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + Map phases = Map.of(phaseName, new Phase(phaseName, TimeValue.ZERO, Map.of())); if (invalid) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(phases.values())); assertThat(e.getMessage(), equalTo("Timeseries lifecycle does not support phase [" + phaseName + "]")); @@ -109,7 +101,7 @@ public void testValidateHotPhase() { invalidAction = getTestAction(randomFrom("allocate", "delete", "freeze")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map hotPhase = Collections.singletonMap("hot", new Phase("hot", TimeValue.ZERO, actions)); + Map hotPhase = Map.of("hot", new Phase("hot", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(hotPhase.values())); @@ -123,14 +115,14 @@ public void testValidateHotPhase() { final Map hotActionMap = hotActions.stream() .map(this::getTestAction) .collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); - TimeseriesLifecycleType.INSTANCE.validate(Collections.singleton(new Phase("hot", TimeValue.ZERO, hotActionMap))); + TimeseriesLifecycleType.INSTANCE.validate(List.of(new Phase("hot", TimeValue.ZERO, hotActionMap))); }; - validateHotActions.accept(Arrays.asList(RolloverAction.NAME)); - validateHotActions.accept(Arrays.asList(RolloverAction.NAME, ForceMergeAction.NAME)); + validateHotActions.accept(List.of(RolloverAction.NAME)); + validateHotActions.accept(List.of(RolloverAction.NAME, ForceMergeAction.NAME)); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateHotActions.accept(Arrays.asList(ForceMergeAction.NAME)) + () -> validateHotActions.accept(List.of(ForceMergeAction.NAME)) ); assertThat( e.getMessage(), @@ -148,7 +140,7 @@ public void testValidateWarmPhase() { invalidAction = getTestAction(randomFrom("rollover", "delete", "freeze")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map warmPhase = Collections.singletonMap("warm", new Phase("warm", TimeValue.ZERO, actions)); + Map warmPhase = Map.of("warm", new Phase("warm", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(warmPhase.values())); @@ -167,7 +159,7 @@ public void testValidateColdPhase() { invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map coldPhase = Collections.singletonMap("cold", new Phase("cold", TimeValue.ZERO, actions)); + Map coldPhase = Map.of("cold", new Phase("cold", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(coldPhase.values())); @@ -188,7 +180,7 @@ public void testValidateFrozenPhase() { invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map frozenPhase = Collections.singletonMap("frozen", new Phase("frozen", TimeValue.ZERO, actions)); + Map frozenPhase = Map.of("frozen", new Phase("frozen", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows( @@ -210,7 +202,7 @@ public void testValidateDeletePhase() { invalidAction = getTestAction(randomFrom("allocate", "rollover", "forcemerge", "shrink", "freeze", "set_priority")); actions.put(invalidAction.getWriteableName(), invalidAction); } - Map deletePhase = Collections.singletonMap("delete", new Phase("delete", TimeValue.ZERO, actions)); + Map deletePhase = Map.of("delete", new Phase("delete", TimeValue.ZERO, actions)); if (invalidAction != null) { Exception e = expectThrows( @@ -459,7 +451,7 @@ public void testValidateDownsamplingAction() { public void testGetOrderedPhases() { Map phaseMap = new HashMap<>(); for (String phaseName : randomSubsetOf(randomIntBetween(0, ORDERED_VALID_PHASES.size()), ORDERED_VALID_PHASES)) { - phaseMap.put(phaseName, new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + phaseMap.put(phaseName, new Phase(phaseName, TimeValue.ZERO, Map.of())); } assertTrue(isSorted(TimeseriesLifecycleType.INSTANCE.getOrderedPhases(phaseMap), Phase::getName, ORDERED_VALID_PHASES)); @@ -509,7 +501,7 @@ private boolean isUnfollowInjected(String phaseName, String actionName) { public void testGetOrderedActionsInvalidPhase() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> TimeseriesLifecycleType.INSTANCE.getOrderedActions(new Phase("invalid", TimeValue.ZERO, Collections.emptyMap())) + () -> TimeseriesLifecycleType.INSTANCE.getOrderedActions(new Phase("invalid", TimeValue.ZERO, Map.of())) ); assertThat(exception.getMessage(), equalTo("lifecycle type [timeseries] does not support phase [invalid]")); } @@ -583,25 +575,25 @@ public void testShouldMigrateDataToTiers() { { // not inject in hot phase - Phase phase = new Phase(HOT_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(HOT_PHASE, TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } { // not inject in frozen phase - Phase phase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } { // not inject in delete phase - Phase phase = new Phase(DELETE_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(DELETE_PHASE, TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } { // return false for invalid phase - Phase phase = new Phase(HOT_PHASE + randomAlphaOfLength(5), TimeValue.ZERO, Collections.emptyMap()); + Phase phase = new Phase(HOT_PHASE + randomAlphaOfLength(5), TimeValue.ZERO, Map.of()); assertThat(TimeseriesLifecycleType.shouldInjectMigrateStepForPhase(phase), is(false)); } } @@ -620,7 +612,7 @@ public void testValidatingSearchableSnapshotRepos() { Phase coldPhase = new Phase(HOT_PHASE, TimeValue.ZERO, coldActions); Phase frozenPhase = new Phase(HOT_PHASE, TimeValue.ZERO, frozenActions); - validateAllSearchableSnapshotActionsUseSameRepository(Arrays.asList(hotPhase, coldPhase, frozenPhase)); + validateAllSearchableSnapshotActionsUseSameRepository(List.of(hotPhase, coldPhase, frozenPhase)); } { @@ -634,7 +626,7 @@ public void testValidatingSearchableSnapshotRepos() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateAllSearchableSnapshotActionsUseSameRepository(Arrays.asList(hotPhase, coldPhase, frozenPhase)) + () -> validateAllSearchableSnapshotActionsUseSameRepository(List.of(hotPhase, coldPhase, frozenPhase)) ); assertThat( e.getMessage(), @@ -649,25 +641,25 @@ public void testValidatingSearchableSnapshotRepos() { public void testValidatingIncreasingAges() { { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.ZERO, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Map.of()); assertFalse( Strings.hasText( - validateMonotonicallyIncreasingPhaseTimings(Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)) + validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)) ) ); } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); List phases = new ArrayList<>(); phases.add(hotPhase); @@ -687,15 +679,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueHours(12), Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueHours(12), Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.ZERO, Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -708,15 +698,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, null, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, null, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(2), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -729,15 +717,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, null, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(1), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, null, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -750,15 +736,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, null, Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, null, Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -772,15 +756,13 @@ public void testValidatingIncreasingAges() { } { - Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Collections.emptyMap()); - Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Collections.emptyMap()); - Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Collections.emptyMap()); + Phase hotPhase = new Phase(HOT_PHASE, TimeValue.timeValueDays(3), Map.of()); + Phase warmPhase = new Phase(WARM_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase coldPhase = new Phase(COLD_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.timeValueDays(2), Map.of()); + Phase deletePhase = new Phase(DELETE_PHASE, TimeValue.timeValueDays(1), Map.of()); - String err = validateMonotonicallyIncreasingPhaseTimings( - Arrays.asList(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase) - ); + String err = validateMonotonicallyIncreasingPhaseTimings(List.of(hotPhase, warmPhase, coldPhase, frozenPhase, deletePhase)); assertThat( err, @@ -799,7 +781,7 @@ public void testValidateFrozenPhaseHasSearchableSnapshot() { Map frozenActions = new HashMap<>(); frozenActions.put(SearchableSnapshotAction.NAME, new SearchableSnapshotAction("repo1", randomBoolean())); Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, frozenActions); - validateFrozenPhaseHasSearchableSnapshotAction(Collections.singleton(frozenPhase)); + validateFrozenPhaseHasSearchableSnapshotAction(List.of(frozenPhase)); } { @@ -807,7 +789,7 @@ public void testValidateFrozenPhaseHasSearchableSnapshot() { Phase frozenPhase = new Phase(FROZEN_PHASE, TimeValue.ZERO, frozenActions); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> validateFrozenPhaseHasSearchableSnapshotAction(Collections.singleton(frozenPhase)) + () -> validateFrozenPhaseHasSearchableSnapshotAction(List.of(frozenPhase)) ); assertThat( e.getMessage(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java index 71f7ea2925f16..8e40d3af86d81 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStepTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import org.mockito.Mockito; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -30,7 +30,7 @@ protected UnfollowFollowerIndexStep newInstance(Step.StepKey key, Step.StepKey n public void testUnFollow() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -51,7 +51,7 @@ public void testUnFollow() throws Exception { public void testRequestNotAcknowledged() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -71,7 +71,7 @@ public void testRequestNotAcknowledged() { public void testUnFollowUnfollowFailed() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -93,7 +93,7 @@ public void testUnFollowUnfollowFailed() { public void testFailureToReleaseRetentionLeases() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java index e4bcfd88737f2..3ede4d7668cd0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStepTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; import java.util.List; import java.util.function.LongSupplier; @@ -68,7 +67,7 @@ public void testPerformAction() { .numberOfReplicas(randomIntBetween(0, 5)) .build(); IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10)) - .putRolloverInfo(new RolloverInfo(alias, Collections.emptyList(), rolloverTime)) + .putRolloverInfo(new RolloverInfo(alias, List.of(), rolloverTime)) .settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) @@ -88,7 +87,7 @@ public void testPerformActionOnDataStream() { long rolloverTime = randomValueOtherThan(creationDate, () -> randomNonNegativeLong()); String dataStreamName = "test-datastream"; IndexMetadata originalIndexMeta = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 1)) - .putRolloverInfo(new RolloverInfo(dataStreamName, Collections.emptyList(), rolloverTime)) + .putRolloverInfo(new RolloverInfo(dataStreamName, List.of(), rolloverTime)) .settings(settings(IndexVersion.current())) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java index e12bae3b92f80..328698254dc76 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java @@ -125,7 +125,7 @@ public void testResultEvaluatedOnWriteIndexAliasWhenExists() { assertThat( "the rolled index has both the primary and the replica shards started so the condition should be met", - createRandomInstance().isConditionMet(originalIndex.getIndex(), clusterState).isComplete(), + createRandomInstance().isConditionMet(originalIndex.getIndex(), clusterState).complete(), is(true) ); } @@ -163,7 +163,7 @@ public void testResultEvaluatedOnOnlyIndexTheAliasPointsToIfWriteIndexIsNull() { assertThat( "the index the alias is pointing to has both the primary and the replica shards started so the condition should be" + " met", - createRandomInstance().isConditionMet(originalIndex.getIndex(), clusterState).isComplete(), + createRandomInstance().isConditionMet(originalIndex.getIndex(), clusterState).complete(), is(true) ); } @@ -244,13 +244,13 @@ public void testResultEvaluatedOnDataStream() throws IOException { boolean useFailureStore = randomBoolean(); IndexMetadata indexToOperateOn = useFailureStore ? failureOriginalIndexMeta : originalIndexMeta; ClusterStateWaitStep.Result result = waitForActiveShardsStep.isConditionMet(indexToOperateOn.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); + assertThat(result.complete(), is(false)); XContentBuilder expected = new WaitForActiveShardsStep.ActiveShardsInfo(2, "3", false).toXContent( JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS ); - String actualResultAsString = Strings.toString(result.getInformationContext()); + String actualResultAsString = Strings.toString(result.informationContext()); assertThat(actualResultAsString, is(Strings.toString(expected))); assertThat(actualResultAsString, containsString("waiting for [3] shards to become active, but only [2] are active")); } @@ -288,13 +288,13 @@ public void testResultReportsMeaningfulMessage() throws IOException { .build(); ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(originalIndex.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); + assertThat(result.complete(), is(false)); XContentBuilder expected = new WaitForActiveShardsStep.ActiveShardsInfo(2, "3", false).toXContent( JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS ); - String actualResultAsString = Strings.toString(result.getInformationContext()); + String actualResultAsString = Strings.toString(result.informationContext()); assertThat(actualResultAsString, is(Strings.toString(expected))); assertThat(actualResultAsString, containsString("waiting for [3] shards to become active, but only [2] are active")); } @@ -316,9 +316,9 @@ public void testResultReportsErrorMessage() { WaitForActiveShardsStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(new Index("index-000000", UUID.randomUUID().toString()), clusterState); - assertThat(result.isComplete(), is(false)); + assertThat(result.complete(), is(false)); - String actualResultAsString = Strings.toString(result.getInformationContext()); + String actualResultAsString = Strings.toString(result.informationContext()); assertThat( actualResultAsString, containsString( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java index 3247c02cd9bac..2635e14b52eb4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java @@ -64,9 +64,7 @@ protected WaitForDataTierStep copyInstance(WaitForDataTierStep instance) { public void testConditionMet() { String notIncludedTier = randomFrom(DataTier.ALL_DATA_TIERS); - List otherTiers = DataTier.ALL_DATA_TIERS.stream() - .filter(tier -> notIncludedTier.equals(tier) == false) - .collect(Collectors.toList()); + List otherTiers = DataTier.ALL_DATA_TIERS.stream().filter(tier -> notIncludedTier.equals(tier) == false).toList(); List includedTiers = randomSubsetOf(between(1, otherTiers.size()), otherTiers); String tierPreference = String.join(",", includedTiers); WaitForDataTierStep step = new WaitForDataTierStep(randomStepKey(), randomStepKey(), tierPreference); @@ -79,11 +77,11 @@ public void testConditionMet() { private void verify(WaitForDataTierStep step, ClusterState state, boolean complete, String message) { ClusterStateWaitStep.Result result = step.isConditionMet(null, state); - assertThat(result.isComplete(), is(complete)); + assertThat(result.complete(), is(complete)); if (message != null) { - assertThat(Strings.toString(result.getInformationContext()), containsString(message)); + assertThat(Strings.toString(result.informationContext()), containsString(message)); } else { - assertThat(result.getInformationContext(), is(nullValue())); + assertThat(result.informationContext(), is(nullValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepInfoTests.java index 62c12e272ef59..0e5323d51f155 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepInfoTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ilm.WaitForFollowShardTasksStep.Info; -import org.elasticsearch.xpack.core.ilm.WaitForFollowShardTasksStep.Info.ShardFollowTaskInfo; +import org.elasticsearch.xpack.core.ilm.WaitForFollowShardTasksStep.ShardFollowTaskInfo; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java index 162f0ec3361b4..ba94508667776 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForFollowShardTasksStepTests.java @@ -16,9 +16,9 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.mockito.Mockito; -import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -57,11 +57,11 @@ protected WaitForFollowShardTasksStep copyInstance(WaitForFollowShardTasksStep i public void testConditionMet() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(2) .numberOfReplicas(0) .build(); - List statsResponses = Arrays.asList( + List statsResponses = List.of( new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(0, 9, 9)), new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(1, 3, 3)) ); @@ -96,11 +96,11 @@ public void onFailure(Exception e) { public void testConditionNotMetShardsNotInSync() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(2) .numberOfReplicas(0) .build(); - List statsResponses = Arrays.asList( + List statsResponses = List.of( new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(0, 9, 9)), new FollowStatsAction.StatsResponse(createShardFollowTaskStatus(1, 8, 3)) ); @@ -131,10 +131,10 @@ public void onFailure(Exception e) { assertThat(informationContextHolder[0], notNullValue()); assertThat(exceptionHolder[0], nullValue()); WaitForFollowShardTasksStep.Info info = (WaitForFollowShardTasksStep.Info) informationContextHolder[0]; - assertThat(info.getShardFollowTaskInfos().size(), equalTo(1)); - assertThat(info.getShardFollowTaskInfos().get(0).getShardId(), equalTo(1)); - assertThat(info.getShardFollowTaskInfos().get(0).getLeaderGlobalCheckpoint(), equalTo(8L)); - assertThat(info.getShardFollowTaskInfos().get(0).getFollowerGlobalCheckpoint(), equalTo(3L)); + assertThat(info.shardFollowTaskInfos().size(), equalTo(1)); + assertThat(info.shardFollowTaskInfos().get(0).shardId(), equalTo(1)); + assertThat(info.shardFollowTaskInfos().get(0).leaderGlobalCheckpoint(), equalTo(8L)); + assertThat(info.shardFollowTaskInfos().get(0).followerGlobalCheckpoint(), equalTo(3L)); } public void testConditionNotMetNotAFollowerIndex() { @@ -214,7 +214,7 @@ private void mockFollowStatsCall(String expectedIndexName, List listener = (ActionListener) invocationOnMock .getArguments()[2]; - listener.onResponse(new FollowStatsAction.StatsResponses(Collections.emptyList(), Collections.emptyList(), statsResponses)); + listener.onResponse(new FollowStatsAction.StatsResponses(List.of(), List.of(), statsResponses)); return null; }).when(client).execute(Mockito.eq(FollowStatsAction.INSTANCE), Mockito.any(), Mockito.any()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java index 0ae7b02c7400a..1414788f3ff98 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java @@ -93,8 +93,8 @@ public void testConditionMetForGreen() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), nullValue()); } public void testConditionNotMetForGreen() { @@ -119,10 +119,10 @@ public void testConditionNotMetForGreen() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); assertThat(info, notNullValue()); - assertThat(info.getMessage(), equalTo("index is not green; not all shards are active")); + assertThat(info.message(), equalTo("index is not green; not all shards are active")); } public void testConditionNotMetNoIndexRoutingTable() { @@ -139,10 +139,10 @@ public void testConditionNotMetNoIndexRoutingTable() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); assertThat(info, notNullValue()); - assertThat(info.getMessage(), equalTo("index is red; no indexRoutingTable")); + assertThat(info.message(), equalTo("index is red; no indexRoutingTable")); } public void testConditionMetForYellow() { @@ -167,8 +167,8 @@ public void testConditionMetForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), nullValue()); } public void testConditionNotMetForYellow() { @@ -193,10 +193,10 @@ public void testConditionNotMetForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); assertThat(info, notNullValue()); - assertThat(info.getMessage(), equalTo("index is red; not all primary shards are active")); + assertThat(info.message(), equalTo("index is red; not all primary shards are active")); } public void testConditionNotMetNoIndexRoutingTableForYellow() { @@ -213,10 +213,10 @@ public void testConditionNotMetNoIndexRoutingTableForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); assertThat(info, notNullValue()); - assertThat(info.getMessage(), equalTo("index is red; no indexRoutingTable")); + assertThat(info.message(), equalTo("index is red; no indexRoutingTable")); } public void testStepReturnsFalseIfTargetIndexIsMissing() { @@ -243,11 +243,11 @@ public void testStepReturnsFalseIfTargetIndexIsMissing() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN, indexPrefix); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); String targetIndex = indexPrefix + originalIndex.getIndex().getName(); assertThat( - info.getMessage(), + info.message(), is( "[" + step.getKey().action() @@ -303,9 +303,9 @@ public void testStepWaitsForTargetIndexHealthWhenPrefixConfigured() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterTargetInitializing); - assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); - assertThat(info.getMessage(), is("index is not green; not all shards are active")); + assertThat(result.complete(), is(false)); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.informationContext(); + assertThat(info.message(), is("index is not green; not all shards are active")); } { @@ -326,8 +326,8 @@ public void testStepWaitsForTargetIndexHealthWhenPrefixConfigured() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterTargetInitializing); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), nullValue()); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java index ad5e4c9533c99..a0982e72b11af 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.xpack.core.ilm.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -54,7 +54,7 @@ protected WaitForIndexingCompleteStep copyInstance(WaitForIndexingCompleteStep i public void testConditionMet() { IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -65,8 +65,8 @@ public void testConditionMet() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), nullValue()); } public void testConditionMetNotAFollowerIndex() { @@ -82,8 +82,8 @@ public void testConditionMetNotAFollowerIndex() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(true)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(true)); + assertThat(result.informationContext(), nullValue()); } public void testConditionNotMet() { @@ -93,7 +93,7 @@ public void testConditionNotMet() { } IndexMetadata indexMetadata = IndexMetadata.builder("follower-index") .settings(indexSettings) - .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .putCustom(CCR_METADATA_KEY, Map.of()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -104,10 +104,10 @@ public void testConditionNotMet() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); - assertThat(result.isComplete(), is(false)); - assertThat(result.getInformationContext(), notNullValue()); + assertThat(result.complete(), is(false)); + assertThat(result.informationContext(), notNullValue()); WaitForIndexingCompleteStep.IndexingNotCompleteInfo info = (WaitForIndexingCompleteStep.IndexingNotCompleteInfo) result - .getInformationContext(); + .informationContext(); assertThat( info.getMessage(), equalTo( @@ -122,7 +122,7 @@ public void testIndexDeleted() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(new Index("this-index-doesnt-exist", "uuid"), clusterState); - assertThat(result.isComplete(), is(false)); - assertThat(result.getInformationContext(), nullValue()); + assertThat(result.complete(), is(false)); + assertThat(result.informationContext(), nullValue()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java index 0264f7b09c6fd..db0c2957b3ccb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java @@ -38,7 +38,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -396,11 +395,7 @@ public void testEvaluateDoesntTriggerRolloverForIndexManuallyRolledOnLifecycleRo .putAlias(AliasMetadata.builder(rolloverAlias)) .settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias)) .putRolloverInfo( - new RolloverInfo( - rolloverAlias, - Collections.singletonList(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), - System.currentTimeMillis() - ) + new RolloverInfo(rolloverAlias, List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis()) ) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)) @@ -432,7 +427,7 @@ public void testEvaluateTriggersRolloverForIndexManuallyRolledOnDifferentAlias() .putRolloverInfo( new RolloverInfo( randomAlphaOfLength(5), - Collections.singletonList(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), + List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis() ) ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java index 8ca6c0016a791..15bbbe7446429 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java @@ -30,7 +30,7 @@ public class WaitUntilTimeSeriesEndTimePassesStepTests extends AbstractStepTestC protected WaitUntilTimeSeriesEndTimePassesStep createRandomInstance() { Step.StepKey stepKey = randomStepKey(); Step.StepKey nextStepKey = randomStepKey(); - return new WaitUntilTimeSeriesEndTimePassesStep(stepKey, nextStepKey, Instant::now, client); + return new WaitUntilTimeSeriesEndTimePassesStep(stepKey, nextStepKey, Instant::now); } @Override @@ -42,12 +42,12 @@ protected WaitUntilTimeSeriesEndTimePassesStep mutateInstance(WaitUntilTimeSerie case 0 -> key = new Step.StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5)); case 1 -> nextKey = new Step.StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5)); } - return new WaitUntilTimeSeriesEndTimePassesStep(key, nextKey, Instant::now, client); + return new WaitUntilTimeSeriesEndTimePassesStep(key, nextKey, Instant::now); } @Override protected WaitUntilTimeSeriesEndTimePassesStep copyInstance(WaitUntilTimeSeriesEndTimePassesStep instance) { - return new WaitUntilTimeSeriesEndTimePassesStep(instance.getKey(), instance.getNextStepKey(), Instant::now, client); + return new WaitUntilTimeSeriesEndTimePassesStep(instance.getKey(), instance.getNextStepKey(), Instant::now); } public void testEvaluateCondition() { @@ -68,8 +68,7 @@ public void testEvaluateCondition() { WaitUntilTimeSeriesEndTimePassesStep step = new WaitUntilTimeSeriesEndTimePassesStep( randomStepKey(), randomStepKey(), - () -> currentTime, - client + () -> currentTime ); { // end_time has lapsed already so condition must be met diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java index 05c637a3a66c9..1dc8b24c3231d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -90,7 +89,7 @@ protected Writeable.Reader instanceReader() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), new NamedWriteableRegistry.Entry(LifecycleType.class, TestLifecycleType.TYPE, in -> TestLifecycleType.INSTANCE) ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java index feb5ca24a021d..b87a4e41258b8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java @@ -34,7 +34,6 @@ import org.junit.Before; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; public class PutLifecycleRequestTests extends AbstractXContentSerializingTestCase { @@ -78,7 +77,7 @@ public String getPolicyName() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry( LifecycleType.class, TimeseriesLifecycleType.TYPE, @@ -105,7 +104,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java index 76f4d732f4ae7..44fed3d4b488b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyResponseTests.java @@ -14,15 +14,13 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.List; public class RemoveIndexLifecyclePolicyResponseTests extends AbstractXContentSerializingTestCase { @Override protected Response createTestInstance() { - List failedIndexes = Arrays.asList(generateRandomStringArray(20, 20, false)); + List failedIndexes = List.of(generateRandomStringArray(20, 20, false)); return new Response(failedIndexes); } @@ -35,7 +33,7 @@ protected Writeable.Reader instanceReader() { protected Response mutateInstance(Response instance) { List failedIndices = randomValueOtherThan( instance.getFailedIndexes(), - () -> Arrays.asList(generateRandomStringArray(20, 20, false)) + () -> List.of(generateRandomStringArray(20, 20, false)) ); return new Response(failedIndices); } @@ -53,7 +51,7 @@ public void testNullFailedIndices() { public void testHasFailures() { Response response = new Response(new ArrayList<>()); assertFalse(response.hasFailures()); - assertEquals(Collections.emptyList(), response.getFailedIndexes()); + assertEquals(List.of(), response.getFailedIndexes()); int size = randomIntBetween(1, 10); List failedIndexes = new ArrayList<>(size); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationRoutedStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationRoutedStepInfoTests.java index 67214868293ea..0e6903ba6cf44 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationRoutedStepInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/step/info/AllocationRoutedStepInfoTests.java @@ -38,18 +38,18 @@ public final void testEqualsAndHashcode() { protected final AllocationInfo copyInstance(AllocationInfo instance) { return new AllocationInfo( - instance.getNumberOfReplicas(), - instance.getNumberShardsLeftToAllocate(), + instance.numberOfReplicas(), + instance.numberShardsLeftToAllocate(), instance.allShardsActive(), - instance.getMessage() + instance.message() ); } protected AllocationInfo mutateInstance(AllocationInfo instance) throws IOException { - long actualReplicas = instance.getNumberOfReplicas(); - long shardsToAllocate = instance.getNumberShardsLeftToAllocate(); + long actualReplicas = instance.numberOfReplicas(); + long shardsToAllocate = instance.numberShardsLeftToAllocate(); boolean allShardsActive = instance.allShardsActive(); - var message = instance.getMessage(); + var message = instance.message(); switch (between(0, 2)) { case 0 -> shardsToAllocate += between(1, 20); case 1 -> allShardsActive = allShardsActive == false; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java index a9ca5e6da8720..01c0ff88be222 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java @@ -41,8 +41,7 @@ protected InferenceAction.Request createTestInstance() { return new InferenceAction.Request( randomFrom(TaskType.values()), randomAlphaOfLength(6), - // null, - randomNullOrAlphaOfLength(10), + randomAlphaOfLengthOrNull(10), randomList(1, 5, () -> randomAlphaOfLength(8)), randomMap(0, 3, () -> new Tuple<>(randomAlphaOfLength(4), randomAlphaOfLength(4))), randomFrom(InputType.values()), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionActionRequestTests.java new file mode 100644 index 0000000000000..1872ac3caa230 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionActionRequestTests.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.is; + +public class UnifiedCompletionActionRequestTests extends AbstractBWCWireSerializationTestCase { + + public void testValidation_ReturnsException_When_UnifiedCompletionRequestMessage_Is_Null() { + var request = new UnifiedCompletionAction.Request( + "inference_id", + TaskType.COMPLETION, + UnifiedCompletionRequest.of(null), + TimeValue.timeValueSeconds(10) + ); + var exception = request.validate(); + assertThat(exception.getMessage(), is("Validation Failed: 1: Field [messages] cannot be null;")); + } + + public void testValidation_ReturnsException_When_UnifiedCompletionRequest_Is_EmptyArray() { + var request = new UnifiedCompletionAction.Request( + "inference_id", + TaskType.COMPLETION, + UnifiedCompletionRequest.of(List.of()), + TimeValue.timeValueSeconds(10) + ); + var exception = request.validate(); + assertThat(exception.getMessage(), is("Validation Failed: 1: Field [messages] cannot be an empty array;")); + } + + public void testValidation_ReturnsException_When_TaskType_IsNot_Completion() { + var request = new UnifiedCompletionAction.Request( + "inference_id", + TaskType.SPARSE_EMBEDDING, + UnifiedCompletionRequest.of(List.of(UnifiedCompletionRequestTests.randomMessage())), + TimeValue.timeValueSeconds(10) + ); + var exception = request.validate(); + assertThat(exception.getMessage(), is("Validation Failed: 1: Field [taskType] must be [completion];")); + } + + public void testValidation_ReturnsNull_When_TaskType_IsAny() { + var request = new UnifiedCompletionAction.Request( + "inference_id", + TaskType.ANY, + UnifiedCompletionRequest.of(List.of(UnifiedCompletionRequestTests.randomMessage())), + TimeValue.timeValueSeconds(10) + ); + assertNull(request.validate()); + } + + @Override + protected UnifiedCompletionAction.Request mutateInstanceForVersion(UnifiedCompletionAction.Request instance, TransportVersion version) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return UnifiedCompletionAction.Request::new; + } + + @Override + protected UnifiedCompletionAction.Request createTestInstance() { + return new UnifiedCompletionAction.Request( + randomAlphaOfLength(10), + randomFrom(TaskType.values()), + UnifiedCompletionRequestTests.randomUnifiedCompletionRequest(), + TimeValue.timeValueMillis(randomLongBetween(1, 2048)) + ); + } + + @Override + protected UnifiedCompletionAction.Request mutateInstance(UnifiedCompletionAction.Request instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(UnifiedCompletionRequest.getNamedWriteables()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionRequestTests.java new file mode 100644 index 0000000000000..47a0814a584b7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/UnifiedCompletionRequestTests.java @@ -0,0 +1,293 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.action; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class UnifiedCompletionRequestTests extends AbstractBWCWireSerializationTestCase { + + public void testParseAllFields() throws IOException { + String requestJson = """ + { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": [ + { + "text": "some text", + "type": "string" + } + ], + "name": "a name", + "tool_call_id": "100", + "tool_calls": [ + { + "id": "call_62136354", + "type": "function", + "function": { + "arguments": "{'order_id': 'order_12345'}", + "name": "get_delivery_date" + } + } + ] + } + ], + "max_completion_tokens": 100, + "stop": ["stop"], + "temperature": 0.1, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object" + } + } + } + ], + "tool_choice": { + "type": "function", + "function": { + "name": "some function" + } + }, + "top_p": 0.2 + } + """; + + try (var parser = createParser(JsonXContent.jsonXContent, requestJson)) { + var request = UnifiedCompletionRequest.PARSER.apply(parser, null); + var expected = new UnifiedCompletionRequest( + List.of( + new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentObjects( + List.of(new UnifiedCompletionRequest.ContentObject("some text", "string")) + ), + "user", + "a name", + "100", + List.of( + new UnifiedCompletionRequest.ToolCall( + "call_62136354", + new UnifiedCompletionRequest.ToolCall.FunctionField("{'order_id': 'order_12345'}", "get_delivery_date"), + "function" + ) + ) + ) + ), + "gpt-4o", + 100L, + List.of("stop"), + 0.1F, + new UnifiedCompletionRequest.ToolChoiceObject( + "function", + new UnifiedCompletionRequest.ToolChoiceObject.FunctionField("some function") + ), + List.of( + new UnifiedCompletionRequest.Tool( + "function", + new UnifiedCompletionRequest.Tool.FunctionField( + "Get the current weather in a given location", + "get_current_weather", + Map.of("type", "object"), + null + ) + ) + ), + 0.2F + ); + + assertThat(request, is(expected)); + } + } + + public void testParsing() throws IOException { + String requestJson = """ + { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What is the weather like in Boston today?" + } + ], + "stop": "none", + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object" + } + } + } + ], + "tool_choice": "auto" + } + """; + + try (var parser = createParser(JsonXContent.jsonXContent, requestJson)) { + var request = UnifiedCompletionRequest.PARSER.apply(parser, null); + var expected = new UnifiedCompletionRequest( + List.of( + new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("What is the weather like in Boston today?"), + "user", + null, + null, + null + ) + ), + "gpt-4o", + null, + List.of("none"), + null, + new UnifiedCompletionRequest.ToolChoiceString("auto"), + List.of( + new UnifiedCompletionRequest.Tool( + "function", + new UnifiedCompletionRequest.Tool.FunctionField( + "Get the current weather in a given location", + "get_current_weather", + Map.of("type", "object"), + null + ) + ) + ), + null + ); + + assertThat(request, is(expected)); + } + } + + public static UnifiedCompletionRequest randomUnifiedCompletionRequest() { + return new UnifiedCompletionRequest( + randomList(5, UnifiedCompletionRequestTests::randomMessage), + randomAlphaOfLengthOrNull(10), + randomPositiveLongOrNull(), + randomStopOrNull(), + randomFloatOrNull(), + randomToolChoiceOrNull(), + randomToolListOrNull(), + randomFloatOrNull() + ); + } + + public static UnifiedCompletionRequest.Message randomMessage() { + return new UnifiedCompletionRequest.Message( + randomContent(), + randomAlphaOfLength(10), + randomAlphaOfLengthOrNull(10), + randomAlphaOfLengthOrNull(10), + randomToolCallListOrNull() + ); + } + + public static UnifiedCompletionRequest.Content randomContent() { + return randomBoolean() + ? new UnifiedCompletionRequest.ContentString(randomAlphaOfLength(10)) + : new UnifiedCompletionRequest.ContentObjects(randomList(10, UnifiedCompletionRequestTests::randomContentObject)); + } + + public static UnifiedCompletionRequest.ContentObject randomContentObject() { + return new UnifiedCompletionRequest.ContentObject(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + public static List randomToolCallListOrNull() { + return randomBoolean() ? randomList(10, UnifiedCompletionRequestTests::randomToolCall) : null; + } + + public static UnifiedCompletionRequest.ToolCall randomToolCall() { + return new UnifiedCompletionRequest.ToolCall(randomAlphaOfLength(10), randomToolCallFunctionField(), randomAlphaOfLength(10)); + } + + public static UnifiedCompletionRequest.ToolCall.FunctionField randomToolCallFunctionField() { + return new UnifiedCompletionRequest.ToolCall.FunctionField(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + public static List randomStopOrNull() { + return randomBoolean() ? randomStop() : null; + } + + public static List randomStop() { + return randomList(5, () -> randomAlphaOfLength(10)); + } + + public static UnifiedCompletionRequest.ToolChoice randomToolChoiceOrNull() { + return randomBoolean() ? randomToolChoice() : null; + } + + public static UnifiedCompletionRequest.ToolChoice randomToolChoice() { + return randomBoolean() + ? new UnifiedCompletionRequest.ToolChoiceString(randomAlphaOfLength(10)) + : new UnifiedCompletionRequest.ToolChoiceObject(randomAlphaOfLength(10), randomToolChoiceObjectFunctionField()); + } + + public static UnifiedCompletionRequest.ToolChoiceObject.FunctionField randomToolChoiceObjectFunctionField() { + return new UnifiedCompletionRequest.ToolChoiceObject.FunctionField(randomAlphaOfLength(10)); + } + + public static List randomToolListOrNull() { + return randomBoolean() ? randomList(10, UnifiedCompletionRequestTests::randomTool) : null; + } + + public static UnifiedCompletionRequest.Tool randomTool() { + return new UnifiedCompletionRequest.Tool(randomAlphaOfLength(10), randomToolFunctionField()); + } + + public static UnifiedCompletionRequest.Tool.FunctionField randomToolFunctionField() { + return new UnifiedCompletionRequest.Tool.FunctionField( + randomAlphaOfLengthOrNull(10), + randomAlphaOfLength(10), + null, + randomOptionalBoolean() + ); + } + + @Override + protected UnifiedCompletionRequest mutateInstanceForVersion(UnifiedCompletionRequest instance, TransportVersion version) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return UnifiedCompletionRequest::new; + } + + @Override + protected UnifiedCompletionRequest createTestInstance() { + return randomUnifiedCompletionRequest(); + } + + @Override + protected UnifiedCompletionRequest mutateInstance(UnifiedCompletionRequest instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(UnifiedCompletionRequest.getNamedWriteables()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/StreamingUnifiedChatCompletionResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/StreamingUnifiedChatCompletionResultsTests.java new file mode 100644 index 0000000000000..a8f569dbef9d1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/StreamingUnifiedChatCompletionResultsTests.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + * + * this file was contributed to by a generative AI + */ + +package org.elasticsearch.xpack.core.inference.results; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.List; + +public class StreamingUnifiedChatCompletionResultsTests extends ESTestCase { + + public void testResults_toXContentChunked() throws IOException { + String expected = """ + { + "id": "chunk1", + "choices": [ + { + "delta": { + "content": "example_content", + "refusal": "example_refusal", + "role": "assistant", + "tool_calls": [ + { + "index": 1, + "id": "tool1", + "function": { + "arguments": "example_arguments", + "name": "example_function" + }, + "type": "function" + } + ] + }, + "finish_reason": "example_reason", + "index": 0 + } + ], + "model": "example_model", + "object": "example_object", + "usage": { + "completion_tokens": 10, + "prompt_tokens": 5, + "total_tokens": 15 + } + } + """; + + StreamingUnifiedChatCompletionResults.ChatCompletionChunk chunk = new StreamingUnifiedChatCompletionResults.ChatCompletionChunk( + "chunk1", + List.of( + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice( + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta( + "example_content", + "example_refusal", + "assistant", + List.of( + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall( + 1, + "tool1", + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall.Function( + "example_arguments", + "example_function" + ), + "function" + ) + ) + ), + "example_reason", + 0 + ) + ), + "example_model", + "example_object", + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Usage(10, 5, 15) + ); + + Deque deque = new ArrayDeque<>(); + deque.add(chunk); + StreamingUnifiedChatCompletionResults.Results results = new StreamingUnifiedChatCompletionResults.Results(deque); + XContentBuilder builder = JsonXContent.contentBuilder(); + results.toXContentChunked(null).forEachRemaining(xContent -> { + try { + xContent.toXContent(builder, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + assertEquals(expected.replaceAll("\\s+", ""), Strings.toString(builder.prettyPrint()).trim()); + } + + public void testChoiceToXContentChunked() throws IOException { + String expected = """ + { + "delta": { + "content": "example_content", + "refusal": "example_refusal", + "role": "assistant", + "tool_calls": [ + { + "index": 1, + "id": "tool1", + "function": { + "arguments": "example_arguments", + "name": "example_function" + }, + "type": "function" + } + ] + }, + "finish_reason": "example_reason", + "index": 0 + } + """; + + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice choice = + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice( + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta( + "example_content", + "example_refusal", + "assistant", + List.of( + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall( + 1, + "tool1", + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall.Function( + "example_arguments", + "example_function" + ), + "function" + ) + ) + ), + "example_reason", + 0 + ); + + XContentBuilder builder = JsonXContent.contentBuilder(); + choice.toXContentChunked(null).forEachRemaining(xContent -> { + try { + xContent.toXContent(builder, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + assertEquals(expected.replaceAll("\\s+", ""), Strings.toString(builder.prettyPrint()).trim()); + } + + public void testToolCallToXContentChunked() throws IOException { + String expected = """ + { + "index": 1, + "id": "tool1", + "function": { + "arguments": "example_arguments", + "name": "example_function" + }, + "type": "function" + } + """; + + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall toolCall = + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall( + 1, + "tool1", + new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall.Function( + "example_arguments", + "example_function" + ), + "function" + ); + + XContentBuilder builder = JsonXContent.contentBuilder(); + toolCall.toXContentChunked(null).forEachRemaining(xContent -> { + try { + xContent.toXContent(builder, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + assertEquals(expected.replaceAll("\\s+", ""), Strings.toString(builder.prettyPrint()).trim()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 58248121ddbad..c1cd1905c3a17 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -4236,6 +4236,7 @@ public void testInferenceUserRole() { assertTrue(role.cluster().check("cluster:monitor/xpack/inference", request, authentication)); assertTrue(role.cluster().check("cluster:monitor/xpack/inference/get", request, authentication)); assertFalse(role.cluster().check("cluster:admin/xpack/inference/put", request, authentication)); + assertTrue(role.cluster().check("cluster:monitor/xpack/inference/unified", request, authentication)); assertFalse(role.cluster().check("cluster:admin/xpack/inference/delete", request, authentication)); assertTrue(role.cluster().check("cluster:monitor/xpack/ml/trained_models/deployment/infer", request, authentication)); assertFalse(role.cluster().check("cluster:admin/xpack/ml/trained_models/deployment/start", request, authentication)); diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java index 6d95038e2cbcc..54a48ab34e991 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java @@ -63,7 +63,7 @@ public void testMlDeprecationChecks() throws Exception { indexDoc( ".ml-anomalies-.write-" + jobId, jobId + "_model_snapshot_2", - "{\"job_id\":\"deprecation_check_job\",\"snapshot_id\":\"2\",\"snapshot_doc_count\":1,\"min_version\":\"8.0.0\"}" + "{\"job_id\":\"deprecation_check_job\",\"snapshot_id\":\"2\",\"snapshot_doc_count\":1,\"min_version\":\"8.3.0\"}" ); client().performRequest(new Request("POST", "/.ml-anomalies-*/_refresh")); diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index 87d0bfb93e18c..7ad0758d99832 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -366,7 +366,7 @@ private static ClusterState removeSkippedSettings(ClusterState state, String[] i public static class Request extends MasterNodeReadRequest implements IndicesRequest.Replaceable { - private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(false, true, true, true); + private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(false, true, true, true, true); private String[] indices; public Request(TimeValue masterNodeTimeout, String... indices) { diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml index b0f850d09f76d..094d9cbf43089 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml @@ -58,7 +58,7 @@ setup: connector.put: connector_id: test-connector-native body: - index_name: search-test + index_name: content-search-test is_native: true - match: { result: 'created' } @@ -68,7 +68,7 @@ setup: connector_id: test-connector-native - match: { id: test-connector-native } - - match: { index_name: search-test } + - match: { index_name: content-search-test } - match: { is_native: true } - match: { sync_now: false } - match: { status: needs_configuration } @@ -151,6 +151,7 @@ setup: is_native: false service_type: super-connector + --- 'Create Connector - Id returned as part of response': - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml index 4ffa5435a3d7b..f804dc02a9e01 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml @@ -125,3 +125,29 @@ setup: connector_id: test-connector - match: { index_name: search-1-test } + + +--- +"Update Managed Connector Index Name": + - do: + connector.put: + connector_id: test-connector-1 + body: + is_native: true + service_type: super-connector + + - do: + connector.update_index_name: + connector_id: test-connector-1 + body: + index_name: content-search-2-test + + + - match: { result: updated } + + - do: + connector.get: + connector_id: test-connector-1 + + - match: { index_name: content-search-2-test } + diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml index 77c57532ad479..f8cd24d175312 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml @@ -7,7 +7,7 @@ setup: connector.put: connector_id: test-connector body: - index_name: search-1-test + index_name: content-search-1-test name: my-connector language: pl is_native: false @@ -29,7 +29,6 @@ setup: connector_id: test-connector - match: { is_native: true } - - match: { status: configured } - do: connector.update_native: @@ -44,7 +43,6 @@ setup: connector_id: test-connector - match: { is_native: false } - - match: { status: configured } --- "Update Connector Native - 404 when connector doesn't exist": diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml index 1cbff6a35e18b..634f99cd53fde 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml @@ -71,7 +71,7 @@ setup: - do: connector.post: body: - index_name: search-test + index_name: content-search-test is_native: true - set: { id: id } @@ -82,7 +82,7 @@ setup: connector_id: $id - match: { id: $id } - - match: { index_name: search-test } + - match: { index_name: content-search-test } - match: { is_native: true } - match: { sync_now: false } - match: { status: needs_configuration } @@ -102,6 +102,7 @@ setup: is_native: false service_type: super-connector + --- 'Create Connector - Index name used by another connector': - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml index 10e4620ca5603..697b0ee419181 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/20_connector_list.yml @@ -26,7 +26,7 @@ setup: connector.put: connector_id: connector-b body: - index_name: search-2-test + index_name: content-search-2-test name: my-connector-2 language: en is_native: true @@ -40,13 +40,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } - match: { results.2.id: "connector-c" } - match: { results.2.index_name: "search-3-test" } @@ -62,9 +62,9 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-b" } - - match: { results.0.index_name: "search-2-test" } - - match: { results.0.language: "en" } + - match: { results.0.id: "connector-a" } + - match: { results.0.index_name: "search-1-test" } + - match: { results.0.language: "pl" } - match: { results.1.id: "connector-c" } - match: { results.1.index_name: "search-3-test" } @@ -79,13 +79,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } --- "List Connector - empty": @@ -118,11 +118,11 @@ setup: - do: connector.list: - index_name: search-1-test,search-2-test + index_name: search-1-test,content-search-2-test - match: { count: 2 } - - match: { results.0.index_name: "search-1-test" } - - match: { results.1.index_name: "search-2-test" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.1.index_name: "search-1-test" } --- @@ -147,8 +147,8 @@ setup: connector_name: my-connector-1,my-connector-2 - match: { count: 2 } - - match: { results.0.name: "my-connector-1" } - - match: { results.1.name: "my-connector-2" } + - match: { results.0.name: "my-connector-2" } + - match: { results.1.name: "my-connector-1" } --- @@ -156,10 +156,10 @@ setup: - do: connector.list: connector_name: my-connector-1,my-connector-2 - index_name: search-2-test + index_name: content-search-2-test - match: { count: 1 } - - match: { results.0.index_name: "search-2-test" } + - match: { results.0.index_name: "content-search-2-test" } - match: { results.0.name: "my-connector-2" } @@ -230,13 +230,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } - match: { results.2.id: "connector-c" } - match: { results.2.index_name: "search-3-test" } @@ -255,13 +255,13 @@ setup: - match: { count: 3 } # Alphabetical order by index_name for results - - match: { results.0.id: "connector-a" } - - match: { results.0.index_name: "search-1-test" } - - match: { results.0.language: "pl" } + - match: { results.0.id: "connector-b" } + - match: { results.0.index_name: "content-search-2-test" } + - match: { results.0.language: "en" } - - match: { results.1.id: "connector-b" } - - match: { results.1.index_name: "search-2-test" } - - match: { results.1.language: "en" } + - match: { results.1.id: "connector-a" } + - match: { results.1.index_name: "search-1-test" } + - match: { results.1.language: "pl" } - match: { results.2.id: "connector-c" } - match: { results.2.index_name: "search-3-test" } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java index 53debedafc3d8..829943d245149 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java @@ -49,8 +49,9 @@ public Attribute(Source source, String name, Nullability nullability, @Nullable this.nullability = nullability; } - public static String rawTemporaryName(String inner, String outer, String suffix) { - return SYNTHETIC_ATTRIBUTE_NAME_PREFIX + inner + "$" + outer + "$" + suffix; + public static String rawTemporaryName(String... parts) { + var name = String.join("$", parts); + return name.isEmpty() || name.startsWith(SYNTHETIC_ATTRIBUTE_NAME_PREFIX) ? name : SYNTHETIC_ATTRIBUTE_NAME_PREFIX + name; } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/TranslationAware.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/TranslationAware.java new file mode 100644 index 0000000000000..b1ac2b36314fa --- /dev/null +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/TranslationAware.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.expression; + +import org.elasticsearch.xpack.esql.core.planner.TranslatorHandler; +import org.elasticsearch.xpack.esql.core.querydsl.query.Query; + +/** + * Expressions can implement this interface to control how they would be translated and pushed down as Lucene queries. + * When an expression implements {@link TranslationAware}, we call {@link #asQuery(TranslatorHandler)} to get the + * {@link Query} translation, instead of relying on the registered translators from EsqlExpressionTranslators. + */ +public interface TranslationAware { + Query asQuery(TranslatorHandler translatorHandler); +} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java index e63cc1fcf25fe..32f7e181933b4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java @@ -61,7 +61,7 @@ public static Expression combineAnd(List exps) { * * using the given combiner. * - * While a bit longer, this method creates a balanced tree as oppose to a plain + * While a bit longer, this method creates a balanced tree as opposed to a plain * recursive approach which creates an unbalanced one (either to the left or right). */ private static Expression combine(List exps, BiFunction combiner) { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/TranslationAwareExpressionQuery.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/TranslationAwareExpressionQuery.java new file mode 100644 index 0000000000000..92a42d3053b68 --- /dev/null +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/TranslationAwareExpressionQuery.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * Expressions that store their own {@link QueryBuilder} and implement + * {@link org.elasticsearch.xpack.esql.core.expression.TranslationAware} can use {@link TranslationAwareExpressionQuery} + * to wrap their {@link QueryBuilder}, instead of using the other existing {@link Query} implementations. + */ +public class TranslationAwareExpressionQuery extends Query { + private final QueryBuilder queryBuilder; + + public TranslationAwareExpressionQuery(Source source, QueryBuilder queryBuilder) { + super(source); + this.queryBuilder = queryBuilder; + } + + @Override + public QueryBuilder asBuilder() { + return queryBuilder; + } + + @Override + protected String innerToString() { + return queryBuilder.toString(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java index 9b53e6558f4db..191d6443264ca 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java @@ -180,13 +180,16 @@ public static BlockHash buildCategorizeBlockHash( List groups, AggregatorMode aggregatorMode, BlockFactory blockFactory, - AnalysisRegistry analysisRegistry + AnalysisRegistry analysisRegistry, + int emitBatchSize ) { - if (groups.size() != 1) { - throw new IllegalArgumentException("only a single CATEGORIZE group can used"); + if (groups.size() == 1) { + return new CategorizeBlockHash(blockFactory, groups.get(0).channel, aggregatorMode, analysisRegistry); + } else { + assert groups.get(0).isCategorize(); + assert groups.subList(1, groups.size()).stream().noneMatch(GroupSpec::isCategorize); + return new CategorizePackedValuesBlockHash(groups, blockFactory, aggregatorMode, analysisRegistry, emitBatchSize); } - - return new CategorizeBlockHash(blockFactory, groups.get(0).channel, aggregatorMode, analysisRegistry); } /** diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java index 35c6faf84e623..f83776fbdbc85 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java @@ -44,7 +44,7 @@ import java.util.Objects; /** - * Base BlockHash implementation for {@code Categorize} grouping function. + * BlockHash implementation for {@code Categorize} grouping function. */ public class CategorizeBlockHash extends BlockHash { @@ -53,11 +53,9 @@ public class CategorizeBlockHash extends BlockHash { ); private static final int NULL_ORD = 0; - // TODO: this should probably also take an emitBatchSize private final int channel; private final AggregatorMode aggregatorMode; private final TokenListCategorizer.CloseableTokenListCategorizer categorizer; - private final CategorizeEvaluator evaluator; /** @@ -95,12 +93,14 @@ public class CategorizeBlockHash extends BlockHash { } } + boolean seenNull() { + return seenNull; + } + @Override public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { - if (aggregatorMode.isInputPartial() == false) { - addInitial(page, addInput); - } else { - addIntermediate(page, addInput); + try (IntBlock block = add(page)) { + addInput.add(0, block); } } @@ -129,50 +129,38 @@ public void close() { Releasables.close(evaluator, categorizer); } + private IntBlock add(Page page) { + return aggregatorMode.isInputPartial() == false ? addInitial(page) : addIntermediate(page); + } + /** * Adds initial (raw) input to the state. */ - private void addInitial(Page page, GroupingAggregatorFunction.AddInput addInput) { - try (IntBlock result = (IntBlock) evaluator.eval(page.getBlock(channel))) { - addInput.add(0, result); - } + IntBlock addInitial(Page page) { + return (IntBlock) evaluator.eval(page.getBlock(channel)); } /** * Adds intermediate state to the state. */ - private void addIntermediate(Page page, GroupingAggregatorFunction.AddInput addInput) { + private IntBlock addIntermediate(Page page) { if (page.getPositionCount() == 0) { - return; + return null; } BytesRefBlock categorizerState = page.getBlock(channel); if (categorizerState.areAllValuesNull()) { seenNull = true; - try (var newIds = blockFactory.newConstantIntVector(NULL_ORD, 1)) { - addInput.add(0, newIds); - } - return; - } - - Map idMap = readIntermediate(categorizerState.getBytesRef(0, new BytesRef())); - try (IntBlock.Builder newIdsBuilder = blockFactory.newIntBlockBuilder(idMap.size())) { - int fromId = idMap.containsKey(0) ? 0 : 1; - int toId = fromId + idMap.size(); - for (int i = fromId; i < toId; i++) { - newIdsBuilder.appendInt(idMap.get(i)); - } - try (IntBlock newIds = newIdsBuilder.build()) { - addInput.add(0, newIds); - } + return blockFactory.newConstantIntBlockWith(NULL_ORD, 1); } + return recategorize(categorizerState.getBytesRef(0, new BytesRef()), null).asBlock(); } /** - * Read intermediate state from a block. - * - * @return a map from the old category id to the new one. The old ids go from 0 to {@code size - 1}. + * Reads the intermediate state from a block and recategorizes the provided IDs. + * If no IDs are provided, the IDs are the IDs in the categorizer's state in order. + * (So 0...N-1 or 1...N, depending on whether null is present.) */ - private Map readIntermediate(BytesRef bytes) { + IntVector recategorize(BytesRef bytes, IntVector ids) { Map idMap = new HashMap<>(); try (StreamInput in = new BytesArray(bytes).streamInput()) { if (in.readBoolean()) { @@ -185,10 +173,22 @@ private Map readIntermediate(BytesRef bytes) { // +1 because the 0 ordinal is reserved for null idMap.put(oldCategoryId + 1, newCategoryId + 1); } - return idMap; } catch (IOException e) { throw new RuntimeException(e); } + try (IntVector.Builder newIdsBuilder = blockFactory.newIntVectorBuilder(idMap.size())) { + if (ids == null) { + int idOffset = idMap.containsKey(0) ? 0 : 1; + for (int i = 0; i < idMap.size(); i++) { + newIdsBuilder.appendInt(idMap.get(i + idOffset)); + } + } else { + for (int i = 0; i < ids.getPositionCount(); i++) { + newIdsBuilder.appendInt(idMap.get(ids.getInt(i))); + } + } + return newIdsBuilder.build(); + } } /** @@ -198,15 +198,20 @@ private Block buildIntermediateBlock() { if (categorizer.getCategoryCount() == 0) { return blockFactory.newConstantNullBlock(seenNull ? 1 : 0); } + int positionCount = categorizer.getCategoryCount() + (seenNull ? 1 : 0); + // We're returning a block with N positions just because the Page must have all blocks with the same position count! + return blockFactory.newConstantBytesRefBlockWith(serializeCategorizer(), positionCount); + } + + BytesRef serializeCategorizer() { + // TODO: This BytesStreamOutput is not accounted for by the circuit breaker. Fix that! try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeBoolean(seenNull); out.writeVInt(categorizer.getCategoryCount()); for (SerializableTokenListCategory category : categorizer.toCategoriesById()) { category.writeTo(out); } - // We're returning a block with N positions just because the Page must have all blocks with the same position count! - int positionCount = categorizer.getCategoryCount() + (seenNull ? 1 : 0); - return blockFactory.newConstantBytesRefBlockWith(out.bytes().toBytesRef(), positionCount); + return out.bytes().toBytesRef(); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHash.java new file mode 100644 index 0000000000000..20874cb10ceb8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHash.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.analysis.AnalysisRegistry; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * BlockHash implementation for {@code Categorize} grouping function as first + * grouping expression, followed by one or mode other grouping expressions. + *

+ * For the first grouping (the {@code Categorize} grouping function), a + * {@code CategorizeBlockHash} is used, which outputs integers (category IDs). + * Next, a {@code PackedValuesBlockHash} is used on the category IDs and the + * other groupings (which are not {@code Categorize}s). + */ +public class CategorizePackedValuesBlockHash extends BlockHash { + + private final List specs; + private final AggregatorMode aggregatorMode; + private final Block[] blocks; + private final CategorizeBlockHash categorizeBlockHash; + private final PackedValuesBlockHash packedValuesBlockHash; + + CategorizePackedValuesBlockHash( + List specs, + BlockFactory blockFactory, + AggregatorMode aggregatorMode, + AnalysisRegistry analysisRegistry, + int emitBatchSize + ) { + super(blockFactory); + this.specs = specs; + this.aggregatorMode = aggregatorMode; + blocks = new Block[specs.size()]; + + List delegateSpecs = new ArrayList<>(); + delegateSpecs.add(new GroupSpec(0, ElementType.INT)); + for (int i = 1; i < specs.size(); i++) { + delegateSpecs.add(new GroupSpec(i, specs.get(i).elementType())); + } + + boolean success = false; + try { + categorizeBlockHash = new CategorizeBlockHash(blockFactory, specs.get(0).channel(), aggregatorMode, analysisRegistry); + packedValuesBlockHash = new PackedValuesBlockHash(delegateSpecs, blockFactory, emitBatchSize); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + @Override + public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { + try (IntBlock categories = getCategories(page)) { + blocks[0] = categories; + for (int i = 1; i < specs.size(); i++) { + blocks[i] = page.getBlock(specs.get(i).channel()); + } + packedValuesBlockHash.add(new Page(blocks), addInput); + } + } + + private IntBlock getCategories(Page page) { + if (aggregatorMode.isInputPartial() == false) { + return categorizeBlockHash.addInitial(page); + } else { + BytesRefBlock stateBlock = page.getBlock(0); + BytesRef stateBytes = stateBlock.getBytesRef(0, new BytesRef()); + try (StreamInput in = new BytesArray(stateBytes).streamInput()) { + BytesRef categorizerState = in.readBytesRef(); + try (IntVector ids = IntVector.readFrom(blockFactory, in)) { + return categorizeBlockHash.recategorize(categorizerState, ids).asBlock(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public Block[] getKeys() { + Block[] keys = packedValuesBlockHash.getKeys(); + if (aggregatorMode.isOutputPartial() == false) { + // For final output, the keys are the category regexes. + try ( + BytesRefBlock regexes = (BytesRefBlock) categorizeBlockHash.getKeys()[0]; + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(keys[0].getPositionCount()) + ) { + IntVector idsVector = (IntVector) keys[0].asVector(); + int idsOffset = categorizeBlockHash.seenNull() ? 0 : -1; + BytesRef scratch = new BytesRef(); + for (int i = 0; i < idsVector.getPositionCount(); i++) { + int id = idsVector.getInt(i); + if (id == 0) { + builder.appendNull(); + } else { + builder.appendBytesRef(regexes.getBytesRef(id + idsOffset, scratch)); + } + } + keys[0].close(); + keys[0] = builder.build(); + } + } else { + // For intermediate output, the keys are the delegate PackedValuesBlockHash's + // keys, with the category IDs replaced by the categorizer's internal state + // together with the list of category IDs. + BytesRef state; + // TODO: This BytesStreamOutput is not accounted for by the circuit breaker. Fix that! + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeBytesRef(categorizeBlockHash.serializeCategorizer()); + ((IntVector) keys[0].asVector()).writeTo(out); + state = out.bytes().toBytesRef(); + } catch (IOException e) { + throw new RuntimeException(e); + } + keys[0].close(); + keys[0] = blockFactory.newConstantBytesRefBlockWith(state, keys[0].getPositionCount()); + } + return keys; + } + + @Override + public IntVector nonEmpty() { + return packedValuesBlockHash.nonEmpty(); + } + + @Override + public BitArray seenGroupIds(BigArrays bigArrays) { + return packedValuesBlockHash.seenGroupIds(bigArrays); + } + + @Override + public final ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize) { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + Releasables.close(categorizeBlockHash, packedValuesBlockHash); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index 6f8386ec08de1..ccddfdf5cc74a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -51,7 +51,13 @@ public Operator get(DriverContext driverContext) { if (groups.stream().anyMatch(BlockHash.GroupSpec::isCategorize)) { return new HashAggregationOperator( aggregators, - () -> BlockHash.buildCategorizeBlockHash(groups, aggregatorMode, driverContext.blockFactory(), analysisRegistry), + () -> BlockHash.buildCategorizeBlockHash( + groups, + aggregatorMode, + driverContext.blockFactory(), + analysisRegistry, + maxPageSize + ), driverContext ); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java index f8428b7c33568..587deda650a23 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java @@ -130,9 +130,6 @@ public void close() { } finally { page.releaseBlocks(); } - - // TODO: randomize values? May give wrong results - // TODO: assert the categorizer state after adding pages. } public void testCategorizeRawMultivalue() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHashTests.java new file mode 100644 index 0000000000000..cfa023af3d18a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizePackedValuesBlockHashTests.java @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.ValuesBytesRefAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.CannedSourceOperator; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.HashAggregationOperator; +import org.elasticsearch.compute.operator.LocalSourceOperator; +import org.elasticsearch.compute.operator.PageConsumerOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.scanners.StablePluginsRegistry; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.compute.operator.OperatorTestCase.runDriver; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class CategorizePackedValuesBlockHashTests extends BlockHashTestCase { + + private AnalysisRegistry analysisRegistry; + + @Before + private void initAnalysisRegistry() throws IOException { + analysisRegistry = new AnalysisModule( + TestEnvironment.newEnvironment( + Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build() + ), + List.of(new MachineLearning(Settings.EMPTY), new CommonAnalysisPlugin()), + new StablePluginsRegistry() + ).getAnalysisRegistry(); + } + + public void testCategorize_withDriver() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofMb(256)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + DriverContext driverContext = new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays)); + boolean withNull = randomBoolean(); + boolean withMultivalues = randomBoolean(); + + List groupSpecs = List.of( + new BlockHash.GroupSpec(0, ElementType.BYTES_REF, true), + new BlockHash.GroupSpec(1, ElementType.INT, false) + ); + + LocalSourceOperator.BlockSupplier input1 = () -> { + try ( + BytesRefBlock.Builder messagesBuilder = driverContext.blockFactory().newBytesRefBlockBuilder(10); + IntBlock.Builder idsBuilder = driverContext.blockFactory().newIntBlockBuilder(10) + ) { + if (withMultivalues) { + messagesBuilder.beginPositionEntry(); + } + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.1")); + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.2")); + if (withMultivalues) { + messagesBuilder.endPositionEntry(); + } + idsBuilder.appendInt(7); + if (withMultivalues == false) { + idsBuilder.appendInt(7); + } + + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.3")); + messagesBuilder.appendBytesRef(new BytesRef("connection error")); + messagesBuilder.appendBytesRef(new BytesRef("connection error")); + messagesBuilder.appendBytesRef(new BytesRef("connected to 1.1.4")); + idsBuilder.appendInt(42); + idsBuilder.appendInt(7); + idsBuilder.appendInt(42); + idsBuilder.appendInt(7); + + if (withNull) { + messagesBuilder.appendNull(); + idsBuilder.appendInt(43); + } + return new Block[] { messagesBuilder.build(), idsBuilder.build() }; + } + }; + LocalSourceOperator.BlockSupplier input2 = () -> { + try ( + BytesRefBlock.Builder messagesBuilder = driverContext.blockFactory().newBytesRefBlockBuilder(10); + IntBlock.Builder idsBuilder = driverContext.blockFactory().newIntBlockBuilder(10) + ) { + messagesBuilder.appendBytesRef(new BytesRef("connected to 2.1.1")); + messagesBuilder.appendBytesRef(new BytesRef("connected to 2.1.2")); + messagesBuilder.appendBytesRef(new BytesRef("disconnected")); + messagesBuilder.appendBytesRef(new BytesRef("connection error")); + idsBuilder.appendInt(111); + idsBuilder.appendInt(7); + idsBuilder.appendInt(7); + idsBuilder.appendInt(42); + if (withNull) { + messagesBuilder.appendNull(); + idsBuilder.appendNull(); + } + return new Block[] { messagesBuilder.build(), idsBuilder.build() }; + } + }; + + List intermediateOutput = new ArrayList<>(); + + Driver driver = new Driver( + driverContext, + new LocalSourceOperator(input1), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + groupSpecs, + AggregatorMode.INITIAL, + List.of(new ValuesBytesRefAggregatorFunctionSupplier(List.of(0)).groupingAggregatorFactory(AggregatorMode.INITIAL)), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(intermediateOutput::add), + () -> {} + ); + runDriver(driver); + + driver = new Driver( + driverContext, + new LocalSourceOperator(input2), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + groupSpecs, + AggregatorMode.INITIAL, + List.of(new ValuesBytesRefAggregatorFunctionSupplier(List.of(0)).groupingAggregatorFactory(AggregatorMode.INITIAL)), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(intermediateOutput::add), + () -> {} + ); + runDriver(driver); + + List finalOutput = new ArrayList<>(); + + driver = new Driver( + driverContext, + new CannedSourceOperator(intermediateOutput.iterator()), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + groupSpecs, + AggregatorMode.FINAL, + List.of(new ValuesBytesRefAggregatorFunctionSupplier(List.of(2)).groupingAggregatorFactory(AggregatorMode.FINAL)), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(finalOutput::add), + () -> {} + ); + runDriver(driver); + + assertThat(finalOutput, hasSize(1)); + assertThat(finalOutput.get(0).getBlockCount(), equalTo(3)); + BytesRefBlock outputMessages = finalOutput.get(0).getBlock(0); + IntBlock outputIds = finalOutput.get(0).getBlock(1); + BytesRefBlock outputValues = finalOutput.get(0).getBlock(2); + assertThat(outputIds.getPositionCount(), equalTo(outputMessages.getPositionCount())); + assertThat(outputValues.getPositionCount(), equalTo(outputMessages.getPositionCount())); + Map>> result = new HashMap<>(); + for (int i = 0; i < outputMessages.getPositionCount(); i++) { + BytesRef messageBytesRef = ((BytesRef) BlockUtils.toJavaObject(outputMessages, i)); + String message = messageBytesRef == null ? null : messageBytesRef.utf8ToString(); + result.computeIfAbsent(message, key -> new HashMap<>()); + + Integer id = (Integer) BlockUtils.toJavaObject(outputIds, i); + result.get(message).computeIfAbsent(id, key -> new HashSet<>()); + + Object values = BlockUtils.toJavaObject(outputValues, i); + if (values == null) { + result.get(message).get(id).add(null); + } else { + if ((values instanceof List) == false) { + values = List.of(values); + } + for (Object valueObject : (List) values) { + BytesRef value = (BytesRef) valueObject; + result.get(message).get(id).add(value.utf8ToString()); + } + } + } + Releasables.close(() -> Iterators.map(finalOutput.iterator(), (Page p) -> p::releaseBlocks)); + + Map>> expectedResult = Map.of( + ".*?connected.+?to.*?", + Map.of( + 7, + Set.of("connected to 1.1.1", "connected to 1.1.2", "connected to 1.1.4", "connected to 2.1.2"), + 42, + Set.of("connected to 1.1.3"), + 111, + Set.of("connected to 2.1.1") + ), + ".*?connection.+?error.*?", + Map.of(7, Set.of("connection error"), 42, Set.of("connection error")), + ".*?disconnected.*?", + Map.of(7, Set.of("disconnected")) + ); + if (withNull) { + expectedResult = new HashMap<>(expectedResult); + expectedResult.put(null, new HashMap<>()); + expectedResult.get(null).put(null, new HashSet<>()); + expectedResult.get(null).get(null).add(null); + expectedResult.get(null).put(43, new HashSet<>()); + expectedResult.get(null).get(43).add(null); + } + assertThat(result, equalTo(expectedResult)); + } +} diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index 81070b3155f2e..1120a69cc5166 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -21,7 +21,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V4; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V5; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @@ -96,7 +96,7 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - return hasCapabilities(List.of(JOIN_LOOKUP_V4.capabilityName())); + return hasCapabilities(List.of(JOIN_LOOKUP_V5.capabilityName())); } @Override diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 2ec75683ab149..5c7f981c93a97 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V4; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V5; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ protected void shouldSkipTest(String testName) throws IOException { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V4.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V5.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -283,8 +283,8 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - // CCS does not yet support JOIN_LOOKUP_V4 and clusters falsely report they have this capability - // return hasCapabilities(List.of(JOIN_LOOKUP_V4.capabilityName())); + // CCS does not yet support JOIN_LOOKUP_V5 and clusters falsely report they have this capability + // return hasCapabilities(List.of(JOIN_LOOKUP_V5.capabilityName())); return false; } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec index 4ce43961a7077..5ad62dd7a21a8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec @@ -60,6 +60,19 @@ COUNT():long | VALUES(str):keyword | category:keyword 1 | [a, b, c] | .*?disconnected.*? ; +limit before stats +required_capability: categorize_v5 + +FROM sample_data | SORT message | LIMIT 4 + | STATS count=COUNT() BY category=CATEGORIZE(message) + | SORT category +; + +count:long | category:keyword + 3 | .*?Connected.+?to.*? + 1 | .*?Connection.+?error.*? +; + skips stopwords required_capability: categorize_v5 @@ -615,3 +628,159 @@ COUNT():long | x:keyword 3 | [.*?Connection.+?error.*?,.*?Connection.+?error.*?] 1 | [.*?Disconnected.*?,.*?Disconnected.*?] ; + +multiple groupings with categorize and ip +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT() BY category=CATEGORIZE(message), client_ip + | SORT category, client_ip +; + +count:long | category:keyword | client_ip:ip + 1 | .*?Connected.+?to.*? | 172.21.2.113 + 1 | .*?Connected.+?to.*? | 172.21.2.162 + 1 | .*?Connected.+?to.*? | 172.21.3.15 + 3 | .*?Connection.+?error.*? | 172.21.3.15 + 1 | .*?Disconnected.*? | 172.21.0.5 +; + +multiple groupings with categorize and bucketed timestamp +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT() BY category=CATEGORIZE(message), timestamp=BUCKET(@timestamp, 1 HOUR) + | SORT category, timestamp +; + +count:long | category:keyword | timestamp:datetime + 2 | .*?Connected.+?to.*? | 2023-10-23T12:00:00.000Z + 1 | .*?Connected.+?to.*? | 2023-10-23T13:00:00.000Z + 3 | .*?Connection.+?error.*? | 2023-10-23T13:00:00.000Z + 1 | .*?Disconnected.*? | 2023-10-23T13:00:00.000Z +; + + +multiple groupings with categorize and limit before stats +required_capability: categorize_multiple_groupings + +FROM sample_data | SORT message | LIMIT 5 + | STATS count=COUNT() BY category=CATEGORIZE(message), client_ip + | SORT category, client_ip +; + +count:long | category:keyword | client_ip:ip + 1 | .*?Connected.+?to.*? | 172.21.2.113 + 1 | .*?Connected.+?to.*? | 172.21.2.162 + 1 | .*?Connected.+?to.*? | 172.21.3.15 + 2 | .*?Connection.+?error.*? | 172.21.3.15 +; + +multiple groupings with categorize and nulls +required_capability: categorize_multiple_groupings + +FROM employees + | STATS SUM(languages) BY category=CATEGORIZE(job_positions), gender + | SORT category DESC, gender ASC + | LIMIT 5 +; + +SUM(languages):long | category:keyword | gender:keyword + 11 | null | F + 16 | null | M + 14 | .*?Tech.+?Lead.*? | F + 23 | .*?Tech.+?Lead.*? | M + 9 | .*?Tech.+?Lead.*? | null +; + +multiple groupings with categorize and a field that's always null +required_capability: categorize_multiple_groupings + +FROM sample_data + | EVAL nullfield = null + | STATS count=COUNT() BY category=CATEGORIZE(nullfield), client_ip + | SORT client_ip +; + +count:long | category:keyword | client_ip:ip + 1 | null | 172.21.0.5 + 1 | null | 172.21.2.113 + 1 | null | 172.21.2.162 + 4 | null | 172.21.3.15 +; + +multiple groupings with categorize and the same text field +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT() BY category=CATEGORIZE(message), message + | SORT message +; + +count:long | category:keyword | message:keyword + 1 | .*?Connected.+?to.*? | Connected to 10.1.0.1 + 1 | .*?Connected.+?to.*? | Connected to 10.1.0.2 + 1 | .*?Connected.+?to.*? | Connected to 10.1.0.3 + 3 | .*?Connection.+?error.*? | Connection error + 1 | .*?Disconnected.*? | Disconnected +; + +multiple additional complex groupings with categorize +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=COUNT(), duration=SUM(event_duration) BY category=CATEGORIZE(message), SUBSTRING(message, 1, 7), ip_part=TO_LONG(SUBSTRING(TO_STRING(client_ip), 8, 1)), hour=BUCKET(@timestamp, 1 HOUR) + | SORT ip_part, category +; + +count:long | duration:long | category:keyword | SUBSTRING(message, 1, 7):keyword | ip_part:long | hour:datetime + 1 | 1232382 | .*?Disconnected.*? | Disconn | 0 | 2023-10-23T13:00:00.000Z + 2 | 6215122 | .*?Connected.+?to.*? | Connect | 2 | 2023-10-23T12:00:00.000Z + 1 | 1756467 | .*?Connected.+?to.*? | Connect | 3 | 2023-10-23T13:00:00.000Z + 3 | 14027356 | .*?Connection.+?error.*? | Connect | 3 | 2023-10-23T13:00:00.000Z +; + +multiple groupings with categorize and some constants including null +required_capability: categorize_multiple_groupings + +FROM sample_data + | STATS count=MV_COUNT(VALUES(message)) BY category=CATEGORIZE(message), null, constant="constant" + | SORT category +; + +count:integer | category:keyword | null:null | constant:keyword + 3 | .*?Connected.+?to.*? | null | constant + 1 | .*?Connection.+?error.*? | null | constant + 1 | .*?Disconnected.*? | null | constant +; + +multiple groupings with categorize and aggregation filters +required_capability: categorize_multiple_groupings + +FROM employees + | STATS lang_low=AVG(languages) WHERE salary<=50000, lang_high=AVG(languages) WHERE salary>50000 BY category=CATEGORIZE(job_positions), gender + | SORT category, gender + | LIMIT 5 +; + +lang_low:double | lang_high:double | category:keyword | gender:keyword + 2.0 | 5.0 | .*?Accountant.*? | F + 3.0 | 2.5 | .*?Accountant.*? | M + 5.0 | 2.0 | .*?Accountant.*? | null + 3.0 | 3.25 | .*?Architect.*? | F + 3.75 | null | .*?Architect.*? | M +; + +multiple groupings with categorize on null row +required_capability: categorize_multiple_groupings + +ROW message = null, str = ["a", "b", "c"] + | STATS COUNT(), VALUES(str) BY category=CATEGORIZE(message), str + | SORT str +; + +COUNT():long | VALUES(str):keyword | category:keyword | str:keyword + 1 | [a, b, c] | null | a + 1 | [a, b, c] | null | b + 1 | [a, b, c] | null | c +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec index 38f09d2e3c56e..cde5427bf37d6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec @@ -223,7 +223,8 @@ null | null | null ; -overwriteName +// the query is incorrectly physically plan (fails the verification) in pre-8.13.0 versions +overwriteName#[skip:-8.12.99] from employees | sort emp_no asc | eval full_name = concat(first_name, " ", last_name) | dissect full_name "%{emp_no} %{b}" | keep full_name, emp_no, b | limit 3; full_name:keyword | emp_no:keyword | b:keyword @@ -244,7 +245,8 @@ emp_no:integer | first_name:keyword | rest:keyword ; -overwriteNameWhere +// the query is incorrectly physically plan (fails the verification) in pre-8.13.0 versions +overwriteNameWhere#[skip:-8.12.99] from employees | sort emp_no asc | eval full_name = concat(first_name, " ", last_name) | dissect full_name "%{emp_no} %{b}" | where emp_no == "Bezalel" | keep full_name, emp_no, b | limit 3; full_name:keyword | emp_no:keyword | b:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec index 98c88d06caa75..eece1bdfbffa4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec @@ -199,7 +199,8 @@ null | null | null ; -overwriteName +// the query is incorrectly physically plan (fails the verification) in pre-8.13.0 versions +overwriteName#[skip:-8.12.99] from employees | sort emp_no asc | eval full_name = concat(first_name, " ", last_name) | grok full_name "%{WORD:emp_no} %{WORD:b}" | keep full_name, emp_no, b | limit 3; full_name:keyword | emp_no:keyword | b:keyword @@ -209,7 +210,8 @@ Parto Bamford | Parto | Bamford ; -overwriteNameWhere +// the query is incorrectly physically plan (fails the verification) in pre-8.13.0 versions +overwriteNameWhere#[skip:-8.12.99] from employees | sort emp_no asc | eval full_name = concat(first_name, " ", last_name) | grok full_name "%{WORD:emp_no} %{WORD:b}" | where emp_no == "Bezalel" | keep full_name, emp_no, b | limit 3; full_name:keyword | emp_no:keyword | b:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index b01e12fa4f470..74b7a19d06bd6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -5,7 +5,7 @@ //TODO: this sometimes returns null instead of the looked up value (likely related to the execution order) basicOnTheDataNode -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | EVAL language_code = languages @@ -22,7 +22,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; basicRow -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code @@ -33,7 +33,7 @@ language_code:integer | language_name:keyword ; basicOnTheCoordinator -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | SORT emp_no @@ -50,7 +50,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; subsequentEvalOnTheDataNode -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | EVAL language_code = languages @@ -68,7 +68,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM employees | SORT emp_no @@ -85,8 +85,25 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x 10003 | 4 | german | 8 ; +sortEvalBeforeLookup +required_capability: join_lookup_v5 + +FROM employees +| SORT emp_no +| EVAL language_code = (emp_no % 10) + 1 +| LOOKUP JOIN languages_lookup ON language_code +| KEEP emp_no, language_code, language_name +| LIMIT 3 +; + +emp_no:integer | language_code:integer | language_name:keyword +10001 | 2 | French +10002 | 3 | Spanish +10003 | 4 | German +; + lookupIPFromRow -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -97,7 +114,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowing -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -108,7 +125,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -121,7 +138,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeepReordered -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -134,7 +151,7 @@ right | Development | 172.21.0.5 ; lookupIPFromIndex -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -153,7 +170,7 @@ ignoreOrder:true ; lookupIPFromIndexKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -173,7 +190,7 @@ ignoreOrder:true ; lookupIPFromIndexStats -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -189,7 +206,7 @@ count:long | env:keyword ; lookupIPFromIndexStatsKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -206,7 +223,7 @@ count:long | env:keyword ; lookupMessageFromRow -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -217,7 +234,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowing -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -228,7 +245,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowingKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -240,7 +257,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromIndex -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -258,7 +275,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -277,7 +294,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepReordered -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -296,7 +313,7 @@ Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; lookupMessageFromIndexStats -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -311,7 +328,7 @@ count:long | type:keyword ; lookupMessageFromIndexStatsKeep -required_capability: join_lookup_v4 +required_capability: join_lookup_v5 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -325,3 +342,68 @@ count:long | type:keyword 3 | Success 1 | Disconnected ; + +// +// Filtering tests +// + +lookupWithFilterOnLeftSideField +required_capability: join_lookup_v5 + +FROM employees +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| SORT emp_no +| KEEP emp_no, language_code, language_name +| WHERE emp_no >= 10091 AND emp_no < 10094 +; + +emp_no:integer | language_code:integer | language_name:keyword +10091 | 3 | Spanish +10092 | 1 | English +10093 | 3 | Spanish +; + +lookupMessageWithFilterOnRightSideField-Ignore +required_capability: join_lookup_v5 + +FROM sample_data +| LOOKUP JOIN message_types_lookup ON message +| WHERE type == "Error" +| KEEP @timestamp, client_ip, event_duration, message, type +| SORT @timestamp DESC +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword | type:keyword +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error | Error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error | Error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error | Error +; + +lookupWithFieldAndRightSideAfterStats +required_capability: join_lookup_v5 + +FROM sample_data +| LOOKUP JOIN message_types_lookup ON message +| STATS count = count(message) BY type +| WHERE type == "Error" +; + +count:long | type:keyword +3 | Error +; + +lookupWithFieldOnJoinKey-Ignore +required_capability: join_lookup_v5 + +FROM employees +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_code > 1 AND language_name IS NOT NULL +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10001 | 2 | French +10003 | 4 | German +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec index d4c7b8c59fdbc..cb38204a71ab0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec @@ -283,3 +283,33 @@ book_no:keyword | c_score:double 7350 | 2.0 7140 | 3.0 ; + +QstrScoreManipulation +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("title:rings") +| eval _score = _score + 1 +| keep book_no, title, _score +| limit 2; + +book_no:keyword | title:text | _score:double +4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | 2.6404519081115723 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 2.9239964485168457 +; + +QstrScoreOverride +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("title:rings") +| eval _score = "foobar" +| keep book_no, title, _score +| limit 2; + +book_no:keyword | title:text | _score:keyword +4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | foobar +2714 | Return of the King Being the Third Part of The Lord of the Rings | foobar +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec index dd092130c3406..609aa20798edf 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec @@ -318,6 +318,47 @@ wkt:keyword |pt:geo_shape "POINT(111)" |null ; +############################################### +# Tests for GEO_SHAPE type with ST_ENVELOPE, ST_XMIN, etc. +# + +polygonEnvelope +required_capability: st_envelope + +// tag::st_envelope[] +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| KEEP abbrev, airport, envelope +// end::st_envelope[] +| LIMIT 1 +; + +// tag::st_envelope-result[] +abbrev:keyword | airport:text | envelope:geo_shape +CPH | Copenhagen | BBOX(12.453, 12.6398, 55.7327, 55.6318) +// end::st_envelope-result[] +; + +polygonEnvelopeXYMinMax +required_capability: st_envelope + +// tag::st_x_y_min_max[] +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +// end::st_x_y_min_max[] +| LIMIT 1 +; + +// tag::st_x_y_min_max-result[] +abbrev:keyword | airport:text | xmin:double | xmax:double | ymin:double | ymax:double +CPH | Copenhagen | 12.453 | 12.6398 | 55.6318 | 55.7327 +// end::st_x_y_min_max-result[] +; + ############################################### # Tests for CARTESIAN_SHAPE type # diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 6e0a55655ee1c..add6f18887464 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -564,7 +564,8 @@ c:long | gender:keyword | trunk_worked_seconds:long 0 | null | 200000000 ; -byStringAndLongWithAlias +// the query is incorrectly physically plan (fails the verification) in pre-8.13.0 versions +byStringAndLongWithAlias#[skip:-8.12.99] FROM employees | EVAL trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | RENAME gender as g, trunk_worked_seconds as tws diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java new file mode 100644 index 0000000000000..66ac32b33cd4d --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEnrichBasedCrossClusterTestCase.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureResponse; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.enrich.EnrichPlugin; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; + +public abstract class AbstractEnrichBasedCrossClusterTestCase extends AbstractMultiClustersTestCase { + + public static String REMOTE_CLUSTER_1 = "c1"; + public static String REMOTE_CLUSTER_2 = "c2"; + + /** + * subclasses should override if they don't want enrich policies wiped after each test method run + */ + protected boolean tolerateErrorsWhenWipingEnrichPolicies() { + return false; + } + + @Override + protected List remoteClusterAlias() { + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + } + + protected Collection allClusters() { + return CollectionUtils.appendToCopy(remoteClusterAlias(), LOCAL_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(CrossClustersEnrichIT.LocalStateEnrich.class); + plugins.add(IngestCommonPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + } + + static final EnrichPolicy hostPolicy = new EnrichPolicy("match", null, List.of("hosts"), "ip", List.of("ip", "os")); + static final EnrichPolicy vendorPolicy = new EnrichPolicy("match", null, List.of("vendors"), "os", List.of("os", "vendor")); + + @Before + public void setupHostsEnrich() { + // the hosts policy are identical on every node + Map allHosts = Map.of( + "192.168.1.2", + "Windows", + "192.168.1.3", + "MacOS", + "192.168.1.4", + "Linux", + "192.168.1.5", + "Android", + "192.168.1.6", + "iOS", + "192.168.1.7", + "Windows", + "192.168.1.8", + "MacOS", + "192.168.1.9", + "Linux", + "192.168.1.10", + "Linux", + "192.168.1.11", + "Windows" + ); + for (String cluster : allClusters()) { + Client client = client(cluster); + client.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); + for (Map.Entry h : allHosts.entrySet()) { + client.prepareIndex("hosts").setSource("ip", h.getKey(), "os", h.getValue()).get(); + } + client.admin().indices().prepareRefresh("hosts").get(); + client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts", hostPolicy)) + .actionGet(); + client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts")) + .actionGet(); + assertAcked(client.admin().indices().prepareDelete("hosts")); + } + } + + @Before + public void setupVendorPolicy() { + var localVendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Samsung", "Linux", "Redhat"); + var c1Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Google", "Linux", "Suse"); + var c2Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Sony", "Linux", "Ubuntu"); + var vendors = Map.of(LOCAL_CLUSTER, localVendors, REMOTE_CLUSTER_1, c1Vendors, REMOTE_CLUSTER_2, c2Vendors); + for (Map.Entry> e : vendors.entrySet()) { + Client client = client(e.getKey()); + client.admin().indices().prepareCreate("vendors").setMapping("os", "type=keyword", "vendor", "type=keyword").get(); + for (Map.Entry v : e.getValue().entrySet()) { + client.prepareIndex("vendors").setSource("os", v.getKey(), "vendor", v.getValue()).get(); + } + client.admin().indices().prepareRefresh("vendors").get(); + client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors", vendorPolicy)) + .actionGet(); + client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors")) + .actionGet(); + assertAcked(client.admin().indices().prepareDelete("vendors")); + } + } + + @Before + public void setupEventsIndices() { + record Event(long timestamp, String user, String host) { + + } + List e0 = List.of( + new Event(1, "matthew", "192.168.1.3"), + new Event(2, "simon", "192.168.1.5"), + new Event(3, "park", "192.168.1.2"), + new Event(4, "andrew", "192.168.1.7"), + new Event(5, "simon", "192.168.1.20"), + new Event(6, "kevin", "192.168.1.2"), + new Event(7, "akio", "192.168.1.5"), + new Event(8, "luke", "192.168.1.2"), + new Event(9, "jack", "192.168.1.4") + ); + List e1 = List.of( + new Event(1, "andres", "192.168.1.2"), + new Event(2, "sergio", "192.168.1.6"), + new Event(3, "kylian", "192.168.1.8"), + new Event(4, "andrew", "192.168.1.9"), + new Event(5, "jack", "192.168.1.3"), + new Event(6, "kevin", "192.168.1.4"), + new Event(7, "akio", "192.168.1.7"), + new Event(8, "kevin", "192.168.1.21"), + new Event(9, "andres", "192.168.1.8") + ); + List e2 = List.of( + new Event(1, "park", "192.168.1.25"), + new Event(2, "akio", "192.168.1.5"), + new Event(3, "park", "192.168.1.2"), + new Event(4, "kevin", "192.168.1.3") + ); + for (var c : Map.of(LOCAL_CLUSTER, e0, REMOTE_CLUSTER_1, e1, REMOTE_CLUSTER_2, e2).entrySet()) { + Client client = client(c.getKey()); + client.admin() + .indices() + .prepareCreate("events") + .setMapping("timestamp", "type=long", "user", "type=keyword", "host", "type=ip") + .get(); + for (var e : c.getValue()) { + client.prepareIndex("events").setSource("timestamp", e.timestamp, "user", e.user, "host", e.host).get(); + } + client.admin().indices().prepareRefresh("events").get(); + } + } + + @After + public void wipeEnrichPolicies() { + for (String cluster : allClusters()) { + cluster(cluster).wipe(Set.of()); + for (String policy : List.of("hosts", "vendors")) { + if (tolerateErrorsWhenWipingEnrichPolicies()) { + try { + client(cluster).execute( + DeleteEnrichPolicyAction.INSTANCE, + new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policy) + ); + } catch (Exception e) { + assertThat(e.getMessage(), containsString("Cluster is already closed")); + } + + } else { + client(cluster).execute( + DeleteEnrichPolicyAction.INSTANCE, + new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policy) + ); + } + } + } + } + + static String enrichHosts(Enrich.Mode mode) { + return EsqlTestUtils.randomEnrichCommand("hosts", mode, hostPolicy.getMatchField(), hostPolicy.getEnrichFields()); + } + + static String enrichVendors(Enrich.Mode mode) { + return EsqlTestUtils.randomEnrichCommand("vendors", mode, vendorPolicy.getMatchField(), vendorPolicy.getEnrichFields()); + } + + protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + if (randomBoolean()) { + request.profile(true); + } + if (ccsMetadataInResponse != null) { + request.includeCCSMetadata(ccsMetadataInResponse); + } + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } + + public static Tuple randomIncludeCCSMetadata() { + return switch (randomIntBetween(1, 3)) { + case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); + case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); + case 3 -> new Tuple<>(null, Boolean.FALSE); + default -> throw new AssertionError("should not get here"); + }; + } + + public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { + public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new EnrichPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return this.getLicenseState(); + } + }); + } + + public static class EnrichTransportXPackInfoAction extends TransportXPackInfoAction { + @Inject + public EnrichTransportXPackInfoAction( + TransportService transportService, + ActionFilters actionFilters, + LicenseService licenseService, + NodeClient client + ) { + super(transportService, actionFilters, licenseService, client); + } + + @Override + protected List> infoActions() { + return Collections.singletonList(XPackInfoFeatureAction.ENRICH); + } + } + + @Override + protected Class> getInfoAction() { + return CrossClustersQueriesWithInvalidLicenseIT.LocalStateEnrich.EnrichTransportXPackInfoAction.class; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java index 440582dcfbb45..ea78ee2e3cfbd 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.Before; import java.io.IOException; @@ -78,7 +77,7 @@ protected Map skipUnavailableForRemoteClusters() { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(EsqlAsyncActionIT.LocalStateEsqlAsync.class); // allows the async_search DELETE action plugins.add(InternalExchangePlugin.class); plugins.add(PauseFieldPlugin.class); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java index d142752d0c408..09ad97b08f357 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterEnrichUnavailableClustersIT.java @@ -8,36 +8,21 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Tuple; -import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.reindex.ReindexPlugin; -import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; -import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; -import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; -import static org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT.enrichHosts; -import static org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT.enrichVendors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -47,151 +32,26 @@ * This IT test is the dual of CrossClustersEnrichIT, which tests "happy path" * and this one tests unavailable cluster scenarios using (most of) the same tests. */ -public class CrossClusterEnrichUnavailableClustersIT extends AbstractMultiClustersTestCase { - - public static String REMOTE_CLUSTER_1 = "c1"; - public static String REMOTE_CLUSTER_2 = "c2"; - - @Override - protected Collection remoteClusterAlias() { - return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); - } +public class CrossClusterEnrichUnavailableClustersIT extends AbstractEnrichBasedCrossClusterTestCase { @Override protected boolean reuseClusters() { return false; } - private Collection allClusters() { - return CollectionUtils.appendToCopy(remoteClusterAlias(), LOCAL_CLUSTER); + @Override + protected boolean tolerateErrorsWhenWipingEnrichPolicies() { + // attempt to wipe will fail since some clusters are already closed + return true; } @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); - plugins.add(CrossClustersEnrichIT.LocalStateEnrich.class); - plugins.add(IngestCommonPlugin.class); - plugins.add(ReindexPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); return plugins; } - @Override - protected Settings nodeSettings() { - return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); - } - - @Before - public void setupHostsEnrich() { - // the hosts policy are identical on every node - Map allHosts = Map.of( - "192.168.1.2", - "Windows", - "192.168.1.3", - "MacOS", - "192.168.1.4", - "Linux", - "192.168.1.5", - "Android", - "192.168.1.6", - "iOS", - "192.168.1.7", - "Windows", - "192.168.1.8", - "MacOS", - "192.168.1.9", - "Linux", - "192.168.1.10", - "Linux", - "192.168.1.11", - "Windows" - ); - for (String cluster : allClusters()) { - Client client = client(cluster); - client.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); - for (Map.Entry h : allHosts.entrySet()) { - client.prepareIndex("hosts").setSource("ip", h.getKey(), "os", h.getValue()).get(); - } - client.admin().indices().prepareRefresh("hosts").get(); - client.execute( - PutEnrichPolicyAction.INSTANCE, - new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts", CrossClustersEnrichIT.hostPolicy) - ).actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("hosts")); - } - } - - @Before - public void setupVendorPolicy() { - var localVendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Samsung", "Linux", "Redhat"); - var c1Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Google", "Linux", "Suse"); - var c2Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Sony", "Linux", "Ubuntu"); - var vendors = Map.of(LOCAL_CLUSTER, localVendors, "c1", c1Vendors, "c2", c2Vendors); - for (Map.Entry> e : vendors.entrySet()) { - Client client = client(e.getKey()); - client.admin().indices().prepareCreate("vendors").setMapping("os", "type=keyword", "vendor", "type=keyword").get(); - for (Map.Entry v : e.getValue().entrySet()) { - client.prepareIndex("vendors").setSource("os", v.getKey(), "vendor", v.getValue()).get(); - } - client.admin().indices().prepareRefresh("vendors").get(); - client.execute( - PutEnrichPolicyAction.INSTANCE, - new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors", CrossClustersEnrichIT.vendorPolicy) - ).actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("vendors")); - } - } - - @Before - public void setupEventsIndices() { - record Event(long timestamp, String user, String host) {} - - List e0 = List.of( - new Event(1, "matthew", "192.168.1.3"), - new Event(2, "simon", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "andrew", "192.168.1.7"), - new Event(5, "simon", "192.168.1.20"), - new Event(6, "kevin", "192.168.1.2"), - new Event(7, "akio", "192.168.1.5"), - new Event(8, "luke", "192.168.1.2"), - new Event(9, "jack", "192.168.1.4") - ); - List e1 = List.of( - new Event(1, "andres", "192.168.1.2"), - new Event(2, "sergio", "192.168.1.6"), - new Event(3, "kylian", "192.168.1.8"), - new Event(4, "andrew", "192.168.1.9"), - new Event(5, "jack", "192.168.1.3"), - new Event(6, "kevin", "192.168.1.4"), - new Event(7, "akio", "192.168.1.7"), - new Event(8, "kevin", "192.168.1.21"), - new Event(9, "andres", "192.168.1.8") - ); - List e2 = List.of( - new Event(1, "park", "192.168.1.25"), - new Event(2, "akio", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "kevin", "192.168.1.3") - ); - for (var c : Map.of(LOCAL_CLUSTER, e0, "c1", e1, "c2", e2).entrySet()) { - Client client = client(c.getKey()); - client.admin() - .indices() - .prepareCreate("events") - .setMapping("timestamp", "type=long", "user", "type=keyword", "host", "type=ip") - .get(); - for (var e : c.getValue()) { - client.prepareIndex("events").setSource("timestamp", e.timestamp, "user", e.user, "host", e.host).get(); - } - client.admin().indices().prepareRefresh("events").get(); - } - } - public void testEnrichWithHostsPolicyAndDisconnectedRemotesWithSkipUnavailableTrue() throws IOException { setSkipUnavailable(REMOTE_CLUSTER_1, true); setSkipUnavailable(REMOTE_CLUSTER_2, true); @@ -645,19 +505,6 @@ public void testEnrichRemoteWithVendor() throws IOException { } } - protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { - EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query(query); - request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); - if (randomBoolean()) { - request.profile(true); - } - if (ccsMetadataInResponse != null) { - request.includeCCSMetadata(ccsMetadataInResponse); - } - return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); - } - private static void assertCCSExecutionInfoDetails(EsqlExecutionInfo executionInfo) { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); assertTrue(executionInfo.isCrossClusterSearch()); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java index 0f1aa8541fdd9..3607e080bae90 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterQueryUnavailableRemotesIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import java.io.IOException; import java.util.ArrayList; @@ -54,8 +53,8 @@ protected boolean reuseClusters() { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); - plugins.add(org.elasticsearch.xpack.esql.action.CrossClustersQueryIT.InternalExchangePlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); + plugins.add(CrossClustersQueryIT.InternalExchangePlugin.class); return plugins; } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java index 0910e820c118a..68bfc60202365 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.plugin.ComputeService; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.Before; import java.util.ArrayList; @@ -62,7 +61,7 @@ protected Collection remoteClusterAlias() { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(InternalExchangePlugin.class); plugins.add(PauseFieldPlugin.class); return plugins; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index e8e9f45694e9c..4e6be6cc2bf74 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -7,218 +7,34 @@ package org.elasticsearch.xpack.esql.action; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Tuple; -import org.elasticsearch.ingest.common.IngestCommonPlugin; -import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.reindex.ReindexPlugin; -import org.elasticsearch.test.AbstractMultiClustersTestCase; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureResponse; -import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; -import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; -import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; -import org.elasticsearch.xpack.enrich.EnrichPlugin; -import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; -import org.junit.After; -import org.junit.Before; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -public class CrossClustersEnrichIT extends AbstractMultiClustersTestCase { - - @Override - protected Collection remoteClusterAlias() { - return List.of("c1", "c2"); - } - - protected Collection allClusters() { - return CollectionUtils.appendToCopy(remoteClusterAlias(), LOCAL_CLUSTER); - } +public class CrossClustersEnrichIT extends AbstractEnrichBasedCrossClusterTestCase { @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); - plugins.add(LocalStateEnrich.class); - plugins.add(IngestCommonPlugin.class); - plugins.add(ReindexPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); return plugins; } - @Override - protected Settings nodeSettings() { - return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); - } - - static final EnrichPolicy hostPolicy = new EnrichPolicy("match", null, List.of("hosts"), "ip", List.of("ip", "os")); - static final EnrichPolicy vendorPolicy = new EnrichPolicy("match", null, List.of("vendors"), "os", List.of("os", "vendor")); - - @Before - public void setupHostsEnrich() { - // the hosts policy are identical on every node - Map allHosts = Map.of( - "192.168.1.2", - "Windows", - "192.168.1.3", - "MacOS", - "192.168.1.4", - "Linux", - "192.168.1.5", - "Android", - "192.168.1.6", - "iOS", - "192.168.1.7", - "Windows", - "192.168.1.8", - "MacOS", - "192.168.1.9", - "Linux", - "192.168.1.10", - "Linux", - "192.168.1.11", - "Windows" - ); - for (String cluster : allClusters()) { - Client client = client(cluster); - client.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); - for (Map.Entry h : allHosts.entrySet()) { - client.prepareIndex("hosts").setSource("ip", h.getKey(), "os", h.getValue()).get(); - } - client.admin().indices().prepareRefresh("hosts").get(); - client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts", hostPolicy)) - .actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "hosts")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("hosts")); - } - } - - @Before - public void setupVendorPolicy() { - var localVendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Samsung", "Linux", "Redhat"); - var c1Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Google", "Linux", "Suse"); - var c2Vendors = Map.of("Windows", "Microsoft", "MacOS", "Apple", "iOS", "Apple", "Android", "Sony", "Linux", "Ubuntu"); - var vendors = Map.of(LOCAL_CLUSTER, localVendors, "c1", c1Vendors, "c2", c2Vendors); - for (Map.Entry> e : vendors.entrySet()) { - Client client = client(e.getKey()); - client.admin().indices().prepareCreate("vendors").setMapping("os", "type=keyword", "vendor", "type=keyword").get(); - for (Map.Entry v : e.getValue().entrySet()) { - client.prepareIndex("vendors").setSource("os", v.getKey(), "vendor", v.getValue()).get(); - } - client.admin().indices().prepareRefresh("vendors").get(); - client.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors", vendorPolicy)) - .actionGet(); - client.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, "vendors")) - .actionGet(); - assertAcked(client.admin().indices().prepareDelete("vendors")); - } - } - - @Before - public void setupEventsIndices() { - record Event(long timestamp, String user, String host) { - - } - List e0 = List.of( - new Event(1, "matthew", "192.168.1.3"), - new Event(2, "simon", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "andrew", "192.168.1.7"), - new Event(5, "simon", "192.168.1.20"), - new Event(6, "kevin", "192.168.1.2"), - new Event(7, "akio", "192.168.1.5"), - new Event(8, "luke", "192.168.1.2"), - new Event(9, "jack", "192.168.1.4") - ); - List e1 = List.of( - new Event(1, "andres", "192.168.1.2"), - new Event(2, "sergio", "192.168.1.6"), - new Event(3, "kylian", "192.168.1.8"), - new Event(4, "andrew", "192.168.1.9"), - new Event(5, "jack", "192.168.1.3"), - new Event(6, "kevin", "192.168.1.4"), - new Event(7, "akio", "192.168.1.7"), - new Event(8, "kevin", "192.168.1.21"), - new Event(9, "andres", "192.168.1.8") - ); - List e2 = List.of( - new Event(1, "park", "192.168.1.25"), - new Event(2, "akio", "192.168.1.5"), - new Event(3, "park", "192.168.1.2"), - new Event(4, "kevin", "192.168.1.3") - ); - for (var c : Map.of(LOCAL_CLUSTER, e0, "c1", e1, "c2", e2).entrySet()) { - Client client = client(c.getKey()); - client.admin() - .indices() - .prepareCreate("events") - .setMapping("timestamp", "type=long", "user", "type=keyword", "host", "type=ip") - .get(); - for (var e : c.getValue()) { - client.prepareIndex("events").setSource("timestamp", e.timestamp, "user", e.user, "host", e.host).get(); - } - client.admin().indices().prepareRefresh("events").get(); - } - } - - @After - public void wipeEnrichPolicies() { - for (String cluster : allClusters()) { - cluster(cluster).wipe(Set.of()); - for (String policy : List.of("hosts", "vendors")) { - client(cluster).execute( - DeleteEnrichPolicyAction.INSTANCE, - new DeleteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policy) - ); - } - } - } - - static String enrichHosts(Enrich.Mode mode) { - return EsqlTestUtils.randomEnrichCommand("hosts", mode, hostPolicy.getMatchField(), hostPolicy.getEnrichFields()); - } - - static String enrichVendors(Enrich.Mode mode) { - return EsqlTestUtils.randomEnrichCommand("vendors", mode, vendorPolicy.getMatchField(), vendorPolicy.getEnrichFields()); - } - public void testWithHostsPolicy() { for (var mode : Enrich.Mode.values()) { String query = "FROM events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; @@ -606,19 +422,6 @@ public void testEnrichCoordinatorThenEnrichRemote() { ); } - protected EsqlQueryResponse runQuery(String query, Boolean ccsMetadataInResponse) { - EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query(query); - request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); - if (randomBoolean()) { - request.profile(true); - } - if (ccsMetadataInResponse != null) { - request.includeCCSMetadata(ccsMetadataInResponse); - } - return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); - } - private static void assertCCSExecutionInfoDetails(EsqlExecutionInfo executionInfo) { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); assertTrue(executionInfo.isCrossClusterSearch()); @@ -637,49 +440,4 @@ private static void assertCCSExecutionInfoDetails(EsqlExecutionInfo executionInf assertThat(cluster.getFailedShards(), equalTo(0)); } } - - public static Tuple randomIncludeCCSMetadata() { - return switch (randomIntBetween(1, 3)) { - case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); - case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); - case 3 -> new Tuple<>(null, Boolean.FALSE); - default -> throw new AssertionError("should not get here"); - }; - } - - public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { - - public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { - super(settings, configPath); - - plugins.add(new EnrichPlugin(settings) { - @Override - protected XPackLicenseState getLicenseState() { - return this.getLicenseState(); - } - }); - } - - public static class EnrichTransportXPackInfoAction extends TransportXPackInfoAction { - @Inject - public EnrichTransportXPackInfoAction( - TransportService transportService, - ActionFilters actionFilters, - LicenseService licenseService, - NodeClient client - ) { - super(transportService, actionFilters, licenseService, client); - } - - @Override - protected List> infoActions() { - return Collections.singletonList(XPackInfoFeatureAction.ENRICH); - } - } - - @Override - protected Class> getInfoAction() { - return EnrichTransportXPackInfoAction.class; - } - } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java new file mode 100644 index 0000000000000..1ed42b696d65e --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueriesWithInvalidLicenseIT.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Set; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class CrossClustersQueriesWithInvalidLicenseIT extends AbstractEnrichBasedCrossClusterTestCase { + + private static final String LICENSE_ERROR_MESSAGE = "A valid Enterprise license is required to run ES|QL cross-cluster searches."; + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPluginWithNonEnterpriseOrExpiredLicense.class); // key plugin for the test + return plugins; + } + + public void testBasicCrossClusterQuery() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> runQuery("FROM *,*:* | LIMIT 5", requestIncludeMeta) + ); + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testMetadataCrossClusterQuery() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> runQuery("FROM events,*:* METADATA _index | SORT _index", requestIncludeMeta) + ); + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testQueryAgainstNonMatchingClusterWildcardPattern() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + // since this wildcarded expression does not resolve to a valid remote cluster, it is not considered + // a cross-cluster search and thus should not throw a license error + String q = "FROM xremote*:events"; + { + String limit1 = q + " | STATS count(*)"; + try (EsqlQueryResponse resp = runQuery(limit1, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + } + } + } + + public void testCCSWithLimit0() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + + // local only query does not need a valid Enterprise or Trial license + try (EsqlQueryResponse resp = runQuery("FROM events | LIMIT 0", requestIncludeMeta)) { + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); + } + + // cross-cluster searches should fail with license error + String q = randomFrom("FROM events,c1:* | LIMIT 0", "FROM c1:* | LIMIT 0"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testSearchesWhereNonExistentClusterIsSpecified() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + // this one query should be allowed since x* does not resolve to any known remote cluster + try (EsqlQueryResponse resp = runQuery("FROM events,x*:no_such_index* | STATS count(*)", requestIncludeMeta)) { + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + List> values = getValuesList(resp); + assertThat(values, hasSize(1)); + + assertNotNull(executionInfo); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(LOCAL_CLUSTER))); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // since this not a CCS, only the overall took time in the EsqlExecutionInfo matters + assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); + } + + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> runQuery("FROM events,no_such_cluster:no_such_index* | STATS count(*)", requestIncludeMeta) + ); + // with a valid license this would throw "no such remote cluster" exception, but without a valid license, should get a license error + assertThat(e.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testEnrichWithHostsPolicy() { + // local-only queries do not need an Enterprise or Trial license + for (var mode : Enrich.Mode.values()) { + String query = "FROM events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + try (EsqlQueryResponse resp = runQuery(query, null)) { + List> rows = getValuesList(resp); + assertThat( + rows, + equalTo( + List.of( + List.of(2L, "Android"), + List.of(1L, "Linux"), + List.of(1L, "MacOS"), + List.of(4L, "Windows"), + Arrays.asList(1L, (String) null) + ) + ) + ); + assertFalse(resp.getExecutionInfo().isCrossClusterSearch()); + } + } + + // cross-cluster query should fail due to not having valid Enterprise or Trial license + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + + for (var mode : Enrich.Mode.values()) { + String query = "FROM *:events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, requestIncludeMeta)); + assertThat(e.getMessage(), containsString("A valid Enterprise license is required to run ES|QL cross-cluster searches.")); + } + + for (var mode : Enrich.Mode.values()) { + String query = "FROM *:events,events | eval ip= TO_STR(host) | " + enrichHosts(mode) + " | stats c = COUNT(*) by os | SORT os"; + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, requestIncludeMeta)); + assertThat(e.getMessage(), containsString("A valid Enterprise license is required to run ES|QL cross-cluster searches.")); + } + } + + public void testAggThenEnrichRemote() { + String query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | stats c = COUNT(*) by os + | %s + | sort vendor + """, enrichHosts(Enrich.Mode.ANY), enrichVendors(Enrich.Mode.REMOTE)); + var error = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, randomBoolean()).close()); + // with a valid license this would fail with "ENRICH with remote policy can't be executed after STATS", so ensure here + // that the license error is detected first and returned rather than a VerificationException + assertThat(error.getMessage(), containsString(LICENSE_ERROR_MESSAGE)); + } + + public void testEnrichCoordinatorThenEnrichRemote() { + String query = String.format(Locale.ROOT, """ + FROM *:events,events + | eval ip= TO_STR(host) + | %s + | %s + | sort vendor + """, enrichHosts(Enrich.Mode.COORDINATOR), enrichVendors(Enrich.Mode.REMOTE)); + var error = expectThrows(ElasticsearchStatusException.class, () -> runQuery(query, randomBoolean()).close()); + assertThat( + error.getMessage(), + // with a valid license the error is "ENRICH with remote policy can't be executed after another ENRICH with coordinator policy", + // so ensure here that the license error is detected first and returned rather than a VerificationException + containsString(LICENSE_ERROR_MESSAGE) + ); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index 596c70e57ccd6..64cb7f9fe6dd0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -32,7 +32,6 @@ import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import java.io.IOException; @@ -73,13 +72,13 @@ protected Collection remoteClusterAlias() { @Override protected Map skipUnavailableForRemoteClusters() { - return Map.of(REMOTE_CLUSTER_1, randomBoolean()); + return Map.of(REMOTE_CLUSTER_1, randomBoolean(), REMOTE_CLUSTER_2, randomBoolean()); } @Override protected Collection> nodePlugins(String clusterAlias) { List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); - plugins.add(EsqlPlugin.class); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); plugins.add(InternalExchangePlugin.class); return plugins; } @@ -184,7 +183,7 @@ public void testSuccessfulPathways() { } public void testSearchesAgainstNonMatchingIndicesWithLocalOnly() { - Map testClusterInfo = setupClusters(2); + Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); { @@ -905,7 +904,7 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { // cluster-foo* matches nothing and so should not be present in the EsqlExecutionInfo try ( EsqlQueryResponse resp = runQuery( - "from logs-*,no_such_index*,cluster-a:no_such_index*,cluster-foo*:* | stats sum (v)", + "FROM logs-*,no_such_index*,cluster-a:no_such_index*,cluster-foo*:* | STATS sum (v)", requestIncludeMeta ) ) { @@ -1009,7 +1008,7 @@ public void testMetadataIndex() { try ( EsqlQueryResponse resp = runQuery( - "FROM logs*,*:logs* METADATA _index | stats sum(v) by _index | sort _index", + Strings.format("FROM logs*,%s:logs* METADATA _index | stats sum(v) by _index | sort _index", REMOTE_CLUSTER_1), requestIncludeMeta ) ) { @@ -1091,7 +1090,7 @@ public void testProfile() { final int remoteOnlyProfiles; { EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query("FROM *:logs* | stats sum(v)"); + request.query("FROM c*:logs* | stats sum(v)"); request.pragmas(pragmas); request.profile(true); try (EsqlQueryResponse resp = runQuery(request)) { @@ -1124,7 +1123,7 @@ public void testProfile() { final int allProfiles; { EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query("FROM logs*,*:logs* | stats total = sum(v)"); + request.query("FROM logs*,c*:logs* | stats total = sum(v)"); request.pragmas(pragmas); request.profile(true); try (EsqlQueryResponse resp = runQuery(request)) { @@ -1169,7 +1168,7 @@ public void testWarnings() throws Exception { int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); - request.query("FROM logs*,*:logs* | EVAL ip = to_ip(id) | STATS total = sum(v) by ip | LIMIT 10"); + request.query("FROM logs*,c*:logs* | EVAL ip = to_ip(id) | STATS total = sum(v) by ip | LIMIT 10"); InternalTestCluster cluster = cluster(LOCAL_CLUSTER); String node = randomFrom(cluster.getNodeNames()); CountDownLatch latch = new CountDownLatch(1); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithEnterpriseOrTrialLicense.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithEnterpriseOrTrialLicense.java new file mode 100644 index 0000000000000..34d09fc541572 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithEnterpriseOrTrialLicense.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; + +import static org.elasticsearch.test.ESTestCase.randomFrom; + +/** + * In IT tests, use this instead of the EsqlPlugin in order to use ES|QL features + * that require an Enteprise (or Trial) license. + */ +public class EsqlPluginWithEnterpriseOrTrialLicense extends EsqlPlugin { + protected XPackLicenseState getLicenseState() { + License.OperationMode operationMode = randomFrom(License.OperationMode.ENTERPRISE, License.OperationMode.TRIAL); + return new XPackLicenseState(() -> System.currentTimeMillis(), new XPackLicenseStatus(operationMode, true, "Test license expired")); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithNonEnterpriseOrExpiredLicense.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithNonEnterpriseOrExpiredLicense.java new file mode 100644 index 0000000000000..46c3f3f6204cd --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlPluginWithNonEnterpriseOrExpiredLicense.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomFrom; + +/** + * In IT tests, use this instead of the EsqlPlugin in order to test ES|QL features + * using either a: + * - an active (non-expired) basic, standard, missing, gold or platinum Elasticsearch license, OR + * - an expired enterprise or trial license + */ +public class EsqlPluginWithNonEnterpriseOrExpiredLicense extends EsqlPlugin { + protected XPackLicenseState getLicenseState() { + License.OperationMode operationMode; + boolean active; + if (randomBoolean()) { + operationMode = randomFrom( + License.OperationMode.PLATINUM, + License.OperationMode.GOLD, + License.OperationMode.BASIC, + License.OperationMode.MISSING, + License.OperationMode.STANDARD + ); + active = true; + } else { + operationMode = randomFrom(License.OperationMode.ENTERPRISE, License.OperationMode.TRIAL); + active = false; // expired + } + + return new XPackLicenseState( + () -> System.currentTimeMillis(), + new XPackLicenseStatus(operationMode, active, "Test license expired") + ); + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeFromWKBEvaluator.java new file mode 100644 index 0000000000000..3d6dc7277080a --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeFromWKBEvaluator.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StEnvelope}. + * This class is generated. Do not edit it. + */ +public final class StEnvelopeFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StEnvelopeFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StEnvelopeFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StEnvelope.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StEnvelope.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StEnvelopeFromWKBEvaluator get(DriverContext context) { + return new StEnvelopeFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StEnvelopeFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeFromWKBGeoEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeFromWKBGeoEvaluator.java new file mode 100644 index 0000000000000..c61e825c0ee71 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeFromWKBGeoEvaluator.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StEnvelope}. + * This class is generated. Do not edit it. + */ +public final class StEnvelopeFromWKBGeoEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StEnvelopeFromWKBGeoEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StEnvelopeFromWKBGeo"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StEnvelope.fromWellKnownBinaryGeo(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StEnvelope.fromWellKnownBinaryGeo(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StEnvelopeFromWKBGeoEvaluator get(DriverContext context) { + return new StEnvelopeFromWKBGeoEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StEnvelopeFromWKBGeoEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxFromWKBEvaluator.java new file mode 100644 index 0000000000000..0d51ef709c217 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StXMax}. + * This class is generated. Do not edit it. + */ +public final class StXMaxFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StXMaxFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StXMaxFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMax.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMax.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StXMaxFromWKBEvaluator get(DriverContext context) { + return new StXMaxFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StXMaxFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxFromWKBGeoEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxFromWKBGeoEvaluator.java new file mode 100644 index 0000000000000..3707bf421d550 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxFromWKBGeoEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StXMax}. + * This class is generated. Do not edit it. + */ +public final class StXMaxFromWKBGeoEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StXMaxFromWKBGeoEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StXMaxFromWKBGeo"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMax.fromWellKnownBinaryGeo(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMax.fromWellKnownBinaryGeo(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StXMaxFromWKBGeoEvaluator get(DriverContext context) { + return new StXMaxFromWKBGeoEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StXMaxFromWKBGeoEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinFromWKBEvaluator.java new file mode 100644 index 0000000000000..699402ad68dee --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StXMin}. + * This class is generated. Do not edit it. + */ +public final class StXMinFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StXMinFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StXMinFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMin.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMin.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StXMinFromWKBEvaluator get(DriverContext context) { + return new StXMinFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StXMinFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinFromWKBGeoEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinFromWKBGeoEvaluator.java new file mode 100644 index 0000000000000..6a8c041595c1c --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinFromWKBGeoEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StXMin}. + * This class is generated. Do not edit it. + */ +public final class StXMinFromWKBGeoEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StXMinFromWKBGeoEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StXMinFromWKBGeo"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMin.fromWellKnownBinaryGeo(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StXMin.fromWellKnownBinaryGeo(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StXMinFromWKBGeoEvaluator get(DriverContext context) { + return new StXMinFromWKBGeoEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StXMinFromWKBGeoEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxFromWKBEvaluator.java new file mode 100644 index 0000000000000..e8b50099f38f6 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StYMax}. + * This class is generated. Do not edit it. + */ +public final class StYMaxFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StYMaxFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StYMaxFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMax.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMax.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StYMaxFromWKBEvaluator get(DriverContext context) { + return new StYMaxFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StYMaxFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxFromWKBGeoEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxFromWKBGeoEvaluator.java new file mode 100644 index 0000000000000..00e75f862a86c --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxFromWKBGeoEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StYMax}. + * This class is generated. Do not edit it. + */ +public final class StYMaxFromWKBGeoEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StYMaxFromWKBGeoEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StYMaxFromWKBGeo"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMax.fromWellKnownBinaryGeo(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMax.fromWellKnownBinaryGeo(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StYMaxFromWKBGeoEvaluator get(DriverContext context) { + return new StYMaxFromWKBGeoEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StYMaxFromWKBGeoEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinFromWKBEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinFromWKBEvaluator.java new file mode 100644 index 0000000000000..cab66683261aa --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinFromWKBEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StYMin}. + * This class is generated. Do not edit it. + */ +public final class StYMinFromWKBEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StYMinFromWKBEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StYMinFromWKB"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMin.fromWellKnownBinary(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMin.fromWellKnownBinary(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StYMinFromWKBEvaluator get(DriverContext context) { + return new StYMinFromWKBEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StYMinFromWKBEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinFromWKBGeoEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinFromWKBGeoEvaluator.java new file mode 100644 index 0000000000000..8bae9d369fbb4 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinFromWKBGeoEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StYMin}. + * This class is generated. Do not edit it. + */ +public final class StYMinFromWKBGeoEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public StYMinFromWKBGeoEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "StYMinFromWKBGeo"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendDouble(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMin.fromWellKnownBinaryGeo(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + double value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendDouble(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return StYMin.fromWellKnownBinaryGeo(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public StYMinFromWKBGeoEvaluator get(DriverContext context) { + return new StYMinFromWKBGeoEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "StYMinFromWKBGeoEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 8619c0461ac35..6853747171048 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -207,6 +207,11 @@ public enum Cap { */ SPATIAL_CENTROID_NO_RECORDS, + /** + * Support ST_ENVELOPE function (and related ST_XMIN, etc.). + */ + ST_ENVELOPE, + /** * Fix to GROK and DISSECT that allows extracting attributes with the same name as the input * https://github.com/elastic/elasticsearch/issues/110184 @@ -407,6 +412,10 @@ public enum Cap { */ CATEGORIZE_V5, + /** + * Support for multiple groupings in "CATEGORIZE". + */ + CATEGORIZE_MULTIPLE_GROUPINGS, /** * QSTR function */ @@ -523,7 +532,7 @@ public enum Cap { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V4(Build.current().isSnapshot()), + JOIN_LOOKUP_V5(Build.current().isSnapshot()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index ecfe1aa7f9169..f01cc265e330b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.function.Function; @@ -208,7 +207,6 @@ else if (p instanceof Lookup lookup) { checkJoin(p, failures); }); checkRemoteEnrich(plan, failures); - checkMetadataScoreNameReserved(plan, failures); if (failures.isEmpty()) { checkLicense(plan, licenseState, failures); @@ -222,13 +220,6 @@ else if (p instanceof Lookup lookup) { return failures; } - private static void checkMetadataScoreNameReserved(LogicalPlan p, Set failures) { - // _score can only be set as metadata attribute - if (p.inputSet().stream().anyMatch(a -> MetadataAttribute.SCORE.equals(a.name()) && (a instanceof MetadataAttribute) == false)) { - failures.add(fail(p, "`" + MetadataAttribute.SCORE + "` is a reserved METADATA attribute")); - } - } - private void checkSort(LogicalPlan p, Set failures) { if (p instanceof OrderBy ob) { ob.order().forEach(o -> { @@ -325,11 +316,15 @@ private static void checkAggregate(LogicalPlan p, Set failures) { private static void checkCategorizeGrouping(Aggregate agg, Set failures) { // Forbid CATEGORIZE grouping function with other groupings if (agg.groupings().size() > 1) { - agg.groupings().forEach(g -> { + agg.groupings().subList(1, agg.groupings().size()).forEach(g -> { g.forEachDown( Categorize.class, categorize -> failures.add( - fail(categorize, "cannot use CATEGORIZE grouping function [{}] with multiple groupings", categorize.sourceText()) + fail( + categorize, + "CATEGORIZE grouping function [{}] can only be in the first grouping expression", + categorize.sourceText() + ) ) ); }); @@ -382,6 +377,18 @@ private static void checkCategorizeGrouping(Aggregate agg, Set failures ); } }))); + agg.aggregates().forEach(a -> a.forEachDown(FilteredExpression.class, fe -> fe.filter().forEachDown(Attribute.class, attribute -> { + var categorize = categorizeByAttribute.get(attribute); + if (categorize != null) { + failures.add( + fail( + attribute, + "cannot reference CATEGORIZE grouping function [{}] within an aggregation filter", + attribute.sourceText() + ) + ); + } + }))); } private static void checkRateAggregates(Expression expr, int nestedLevel, Set failures) { @@ -421,7 +428,8 @@ private static void checkInvalidNamedExpressionUsage( Expression filter = fe.filter(); failures.add(fail(filter, "WHERE clause allowed only for aggregate functions, none found in [{}]", fe.sourceText())); } - Expression f = fe.filter(); // check the filter has to be a boolean term, similar as checkFilterConditionType + Expression f = fe.filter(); + // check the filter has to be a boolean term, similar as checkFilterConditionType if (f.dataType() != NULL && f.dataType() != BOOLEAN) { failures.add(fail(f, "Condition expression needs to be boolean, found [{}]", f.dataType())); } @@ -432,9 +440,10 @@ private static void checkInvalidNamedExpressionUsage( fail(af, "cannot use aggregate function [{}] in aggregate WHERE clause [{}]", af.sourceText(), fe.sourceText()) ); } - // check the bucketing function against the group + // check the grouping function against the group else if (c instanceof GroupingFunction gf) { - if (Expressions.anyMatch(groups, ex -> ex instanceof Alias a && a.child().semanticEquals(gf)) == false) { + if (c instanceof Categorize + || Expressions.anyMatch(groups, ex -> ex instanceof Alias a && a.child().semanticEquals(gf)) == false) { failures.add(fail(gf, "can only use grouping function [{}] as part of the BY clause", gf.sourceText())); } } @@ -596,6 +605,10 @@ private void gatherMetrics(LogicalPlan plan, BitSet b) { functions.forEach(f -> metrics.incFunctionMetric(f)); } + public XPackLicenseState licenseState() { + return licenseState; + } + /** * Limit QL's comparisons to types we support. This should agree with * {@link EsqlBinaryComparison}'s checkCompatibility method diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/ExpressionWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/ExpressionWritables.java index 7e2de0094c2ab..febeccdad9d78 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/ExpressionWritables.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/ExpressionWritables.java @@ -57,8 +57,13 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StEnvelope; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StXMax; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StXMin; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StYMax; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StYMin; import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; @@ -166,6 +171,11 @@ public static List unaryScalars() { entries.add(Sinh.ENTRY); entries.add(Space.ENTRY); entries.add(Sqrt.ENTRY); + entries.add(StEnvelope.ENTRY); + entries.add(StXMax.ENTRY); + entries.add(StXMin.ENTRY); + entries.add(StYMax.ENTRY); + entries.add(StYMin.ENTRY); entries.add(StX.ENTRY); entries.add(StY.ENTRY); entries.add(Tan.ENTRY); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 1f0d7a2c3c044..e715bda60532a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -118,8 +118,13 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StEnvelope; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StXMax; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StXMin; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StYMax; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StYMin; import org.elasticsearch.xpack.esql.expression.function.scalar.string.BitLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; @@ -352,6 +357,11 @@ private static FunctionDefinition[][] functions() { def(SpatialIntersects.class, SpatialIntersects::new, "st_intersects"), def(SpatialWithin.class, SpatialWithin::new, "st_within"), def(StDistance.class, StDistance::new, "st_distance"), + def(StEnvelope.class, StEnvelope::new, "st_envelope"), + def(StXMax.class, StXMax::new, "st_xmax"), + def(StXMin.class, StXMin::new, "st_xmin"), + def(StYMax.class, StYMax::new, "st_ymax"), + def(StYMin.class, StYMin::new, "st_ymin"), def(StX.class, StX::new, "st_x"), def(StY.class, StY::new, "st_y") }, // conditional @@ -404,7 +414,7 @@ private static FunctionDefinition[][] functions() { def(MvSum.class, MvSum::new, "mv_sum"), def(Split.class, Split::new, "split") }, // fulltext functions - new FunctionDefinition[] { def(Match.class, Match::new, "match"), def(QueryString.class, QueryString::new, "qstr") } }; + new FunctionDefinition[] { def(Match.class, bi(Match::new), "match"), def(QueryString.class, uni(QueryString::new), "qstr") } }; } @@ -414,9 +424,9 @@ private static FunctionDefinition[][] snapshotFunctions() { // The delay() function is for debug/snapshot environments only and should never be enabled in a non-snapshot build. // This is an experimental function and can be removed without notice. def(Delay.class, Delay::new, "delay"), - def(Kql.class, Kql::new, "kql"), + def(Kql.class, uni(Kql::new), "kql"), def(Rate.class, Rate::withUnresolvedTimestamp, "rate"), - def(Term.class, Term::new, "term") } }; + def(Term.class, bi(Term::new), "term") } }; } public EsqlFunctionRegistry snapshotRegistry() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java index 78dc05af8f342..432d2d5f07429 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java @@ -8,14 +8,21 @@ package org.elasticsearch.xpack.esql.expression.function.fulltext; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; +import org.elasticsearch.xpack.esql.core.expression.TranslationAware; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.function.Function; +import org.elasticsearch.xpack.esql.core.planner.ExpressionTranslator; +import org.elasticsearch.xpack.esql.core.planner.TranslatorHandler; +import org.elasticsearch.xpack.esql.core.querydsl.query.Query; +import org.elasticsearch.xpack.esql.core.querydsl.query.TranslationAwareExpressionQuery; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import java.util.List; +import java.util.Objects; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isNotNullAndFoldable; @@ -26,13 +33,15 @@ * These functions needs to be pushed down to Lucene queries to be executed - there's no Evaluator for them, but depend on * {@link org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer} to rewrite them into Lucene queries. */ -public abstract class FullTextFunction extends Function { +public abstract class FullTextFunction extends Function implements TranslationAware { private final Expression query; + private final QueryBuilder queryBuilder; - protected FullTextFunction(Source source, Expression query, List children) { + protected FullTextFunction(Source source, Expression query, List children, QueryBuilder queryBuilder) { super(source, children); this.query = query; + this.queryBuilder = queryBuilder; } @Override @@ -116,4 +125,37 @@ public Nullability nullable() { public String functionType() { return "function"; } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), queryBuilder); + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + + return Objects.equals(queryBuilder, ((FullTextFunction) obj).queryBuilder); + } + + @Override + public Query asQuery(TranslatorHandler translatorHandler) { + if (queryBuilder != null) { + return new TranslationAwareExpressionQuery(source(), queryBuilder); + } + + ExpressionTranslator translator = translator(); + return translator.translate(this, translatorHandler); + } + + public QueryBuilder queryBuilder() { + return queryBuilder; + } + + @SuppressWarnings("rawtypes") + protected abstract ExpressionTranslator translator(); + + public abstract Expression replaceQueryBuilder(QueryBuilder queryBuilder); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Kql.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Kql.java index c03902373c02e..1f7bcadd259a0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Kql.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Kql.java @@ -7,16 +7,20 @@ package org.elasticsearch.xpack.esql.expression.function.fulltext; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.planner.ExpressionTranslator; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.EsqlExpressionTranslators; import org.elasticsearch.xpack.esql.querydsl.query.KqlQuery; import java.io.IOException; @@ -26,7 +30,7 @@ * Full text function that performs a {@link KqlQuery} . */ public class Kql extends FullTextFunction { - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Kql", Kql::new); + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Kql", Kql::readFrom); @FunctionInfo( returnType = "boolean", @@ -42,17 +46,30 @@ public Kql( description = "Query string in KQL query string format." ) Expression queryString ) { - super(source, queryString, List.of(queryString)); + super(source, queryString, List.of(queryString), null); } - private Kql(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class)); + public Kql(Source source, Expression queryString, QueryBuilder queryBuilder) { + super(source, queryString, List.of(queryString), queryBuilder); + } + + private static Kql readFrom(StreamInput in) throws IOException { + Source source = Source.readFrom((PlanStreamInput) in); + Expression query = in.readNamedWriteable(Expression.class); + QueryBuilder queryBuilder = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); + } + return new Kql(source, query, queryBuilder); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); out.writeNamedWriteable(query()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + out.writeOptionalNamedWriteable(queryBuilder()); + } } @Override @@ -62,12 +79,21 @@ public String getWriteableName() { @Override public Expression replaceChildren(List newChildren) { - return new Kql(source(), newChildren.get(0)); + return new Kql(source(), newChildren.get(0), queryBuilder()); } @Override protected NodeInfo info() { - return NodeInfo.create(this, Kql::new, query()); + return NodeInfo.create(this, Kql::new, query(), queryBuilder()); } + @Override + protected ExpressionTranslator translator() { + return new EsqlExpressionTranslators.KqlFunctionTranslator(); + } + + @Override + public Expression replaceQueryBuilder(QueryBuilder queryBuilder) { + return new Kql(source(), query(), queryBuilder); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java index 2b9a7c73a5853..0b2268fe1b022 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java @@ -8,15 +8,18 @@ package org.elasticsearch.xpack.esql.expression.function.fulltext; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.capabilities.Validatable; import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.planner.ExpressionTranslator; import org.elasticsearch.xpack.esql.core.querydsl.query.QueryStringQuery; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -27,6 +30,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.EsqlExpressionTranslators; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; import java.io.IOException; @@ -109,7 +113,11 @@ public Match( description = "Value to find in the provided field." ) Expression matchQuery ) { - super(source, matchQuery, List.of(field, matchQuery)); + this(source, field, matchQuery, null); + } + + public Match(Source source, Expression field, Expression matchQuery, QueryBuilder queryBuilder) { + super(source, matchQuery, List.of(field, matchQuery), queryBuilder); this.field = field; } @@ -117,7 +125,11 @@ private static Match readFrom(StreamInput in) throws IOException { Source source = Source.readFrom((PlanStreamInput) in); Expression field = in.readNamedWriteable(Expression.class); Expression query = in.readNamedWriteable(Expression.class); - return new Match(source, field, query); + QueryBuilder queryBuilder = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); + } + return new Match(source, field, query, queryBuilder); } @Override @@ -125,6 +137,9 @@ public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); out.writeNamedWriteable(field()); out.writeNamedWriteable(query()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + out.writeOptionalNamedWriteable(queryBuilder()); + } } @Override @@ -224,12 +239,12 @@ public Object queryAsObject() { @Override public Expression replaceChildren(List newChildren) { - return new Match(source(), newChildren.get(0), newChildren.get(1)); + return new Match(source(), newChildren.get(0), newChildren.get(1), queryBuilder()); } @Override protected NodeInfo info() { - return NodeInfo.create(this, Match::new, field, query()); + return NodeInfo.create(this, Match::new, field, query(), queryBuilder()); } protected TypeResolutions.ParamOrdinal queryParamOrdinal() { @@ -245,6 +260,16 @@ public String functionType() { return isOperator() ? "operator" : super.functionType(); } + @Override + protected ExpressionTranslator translator() { + return new EsqlExpressionTranslators.MatchFunctionTranslator(); + } + + @Override + public Expression replaceQueryBuilder(QueryBuilder queryBuilder) { + return new Match(source(), field, query(), queryBuilder); + } + @Override public String functionName() { return isOperator() ? ":" : super.functionName(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryString.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryString.java index bd79661534b76..ea21411d09173 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryString.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryString.java @@ -7,10 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.fulltext; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.planner.ExpressionTranslator; import org.elasticsearch.xpack.esql.core.querydsl.query.QueryStringQuery; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -18,6 +21,7 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.EsqlExpressionTranslators; import java.io.IOException; import java.util.List; @@ -27,7 +31,11 @@ */ public class QueryString extends FullTextFunction { - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "QStr", QueryString::new); + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "QStr", + QueryString::readFrom + ); @FunctionInfo( returnType = "boolean", @@ -44,17 +52,30 @@ public QueryString( description = "Query string in Lucene query string format." ) Expression queryString ) { - super(source, queryString, List.of(queryString)); + super(source, queryString, List.of(queryString), null); } - private QueryString(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class)); + public QueryString(Source source, Expression queryString, QueryBuilder queryBuilder) { + super(source, queryString, List.of(queryString), queryBuilder); + } + + private static QueryString readFrom(StreamInput in) throws IOException { + Source source = Source.readFrom((PlanStreamInput) in); + Expression query = in.readNamedWriteable(Expression.class); + QueryBuilder queryBuilder = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); + } + return new QueryString(source, query, queryBuilder); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); out.writeNamedWriteable(query()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + out.writeOptionalNamedWriteable(queryBuilder()); + } } @Override @@ -69,12 +90,21 @@ public String functionName() { @Override public Expression replaceChildren(List newChildren) { - return new QueryString(source(), newChildren.get(0)); + return new QueryString(source(), newChildren.get(0), queryBuilder()); } @Override protected NodeInfo info() { - return NodeInfo.create(this, QueryString::new, query()); + return NodeInfo.create(this, QueryString::new, query(), queryBuilder()); } + @Override + protected ExpressionTranslator translator() { + return new EsqlExpressionTranslators.QueryStringFunctionTranslator(); + } + + @Override + public Expression replaceQueryBuilder(QueryBuilder queryBuilder) { + return new QueryString(source(), query(), queryBuilder); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java index 125a5b02b6e1c..ff8085cd1b44b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java @@ -7,15 +7,18 @@ package org.elasticsearch.xpack.esql.expression.function.fulltext; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.capabilities.Validatable; import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.planner.ExpressionTranslator; import org.elasticsearch.xpack.esql.core.querydsl.query.TermQuery; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -23,6 +26,7 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.EsqlExpressionTranslators; import java.io.IOException; import java.util.List; @@ -56,7 +60,11 @@ public Term( description = "Term you wish to find in the provided field." ) Expression termQuery ) { - super(source, termQuery, List.of(field, termQuery)); + this(source, field, termQuery, null); + } + + public Term(Source source, Expression field, Expression termQuery, QueryBuilder queryBuilder) { + super(source, termQuery, List.of(field, termQuery), queryBuilder); this.field = field; } @@ -64,7 +72,11 @@ private static Term readFrom(StreamInput in) throws IOException { Source source = Source.readFrom((PlanStreamInput) in); Expression field = in.readNamedWriteable(Expression.class); Expression query = in.readNamedWriteable(Expression.class); - return new Term(source, field, query); + QueryBuilder queryBuilder = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); + } + return new Term(source, field, query, queryBuilder); } @Override @@ -72,6 +84,9 @@ public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); out.writeNamedWriteable(field()); out.writeNamedWriteable(query()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS)) { + out.writeOptionalNamedWriteable(queryBuilder()); + } } @Override @@ -101,18 +116,28 @@ public void validate(Failures failures) { @Override public Expression replaceChildren(List newChildren) { - return new Term(source(), newChildren.get(0), newChildren.get(1)); + return new Term(source(), newChildren.get(0), newChildren.get(1), queryBuilder()); } @Override protected NodeInfo info() { - return NodeInfo.create(this, Term::new, field, query()); + return NodeInfo.create(this, Term::new, field, query(), queryBuilder()); } protected TypeResolutions.ParamOrdinal queryParamOrdinal() { return SECOND; } + @Override + protected ExpressionTranslator translator() { + return new EsqlExpressionTranslators.TermFunctionTranslator(); + } + + @Override + public Expression replaceQueryBuilder(QueryBuilder queryBuilder) { + return new Term(source(), field, query(), queryBuilder); + } + public Expression field() { return field; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java index ded913a78bdf1..a100dd64915f1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java @@ -95,7 +95,8 @@ public boolean foldable() { @Override public Nullability nullable() { - // Both nulls and empty strings result in null values + // Null strings and strings that don't produce tokens after analysis lead to null values. + // This includes empty strings, only whitespace, (hexa)decimal numbers and stopwords. return Nullability.TRUE; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelope.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelope.java new file mode 100644 index 0000000000000..934991f3a8088 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelope.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; +import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatial; + +/** + * Determines the minimum bounding rectangle of a geometry. + * The function `st_envelope` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at + * PostGIS:ST_ENVELOPE. + */ +public class StEnvelope extends UnaryScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "StEnvelope", + StEnvelope::new + ); + private DataType dataType; + + @FunctionInfo( + returnType = { "geo_shape", "cartesian_shape" }, + description = "Determines the minimum bounding box of the supplied geometry.", + examples = @Example(file = "spatial_shapes", tag = "st_envelope") + ) + public StEnvelope( + Source source, + @Param( + name = "geometry", + type = { "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" }, + description = "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. " + + "If `null`, the function returns `null`." + ) Expression field + ) { + super(source, field); + } + + private StEnvelope(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected TypeResolution resolveType() { + var resolution = isSpatial(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + if (resolution.resolved()) { + this.dataType = switch (field().dataType()) { + case GEO_POINT, GEO_SHAPE -> GEO_SHAPE; + case CARTESIAN_POINT, CARTESIAN_SHAPE -> CARTESIAN_SHAPE; + default -> NULL; + }; + } + return resolution; + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + if (field().dataType() == GEO_POINT || field().dataType() == DataType.GEO_SHAPE) { + return new StEnvelopeFromWKBGeoEvaluator.Factory(toEvaluator.apply(field()), source()); + } + return new StEnvelopeFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StEnvelope(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StEnvelope::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromWellKnownBinary(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point) { + return wkb; + } + var envelope = SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return UNSPECIFIED.asWkb(envelope.get()); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } + + @ConvertEvaluator(extraName = "FromWKBGeo", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromWellKnownBinaryGeo(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point) { + return wkb; + } + var envelope = SpatialEnvelopeVisitor.visitGeo(geometry, true); + if (envelope.isPresent()) { + return UNSPECIFIED.asWkb(envelope.get()); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMax.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMax.java new file mode 100644 index 0000000000000..d6d710b175113 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMax.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatial; + +/** + * Determines the maximum value of the x-coordinate from a geometry. + * The function `st_xmax` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_XMAX. + */ +public class StXMax extends UnaryScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "StXMax", StXMax::new); + + @FunctionInfo( + returnType = "double", + description = "Extracts the maximum value of the `x` coordinates from the supplied geometry.\n" + + "If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value.", + examples = @Example(file = "spatial_shapes", tag = "st_x_y_min_max") + ) + public StXMax( + Source source, + @Param( + name = "point", + type = { "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" }, + description = "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. " + + "If `null`, the function returns `null`." + ) Expression field + ) { + super(source, field); + } + + private StXMax(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected TypeResolution resolveType() { + return isSpatial(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + if (field().dataType() == GEO_POINT || field().dataType() == DataType.GEO_SHAPE) { + return new StXMaxFromWKBGeoEvaluator.Factory(toEvaluator.apply(field()), source()); + } + return new StXMaxFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StXMax(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StXMax::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getX(); + } + var envelope = SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMaxX(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } + + @ConvertEvaluator(extraName = "FromWKBGeo", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinaryGeo(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getX(); + } + var envelope = SpatialEnvelopeVisitor.visitGeo(geometry, true); + if (envelope.isPresent()) { + return envelope.get().getMaxX(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMin.java new file mode 100644 index 0000000000000..a5fa11bc11b0f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMin.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatial; + +/** + * Determines the minimum value of the x-coordinate from a geometry. + * The function `st_xmin` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_XMIN. + */ +public class StXMin extends UnaryScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "StXMin", StXMin::new); + + @FunctionInfo( + returnType = "double", + description = "Extracts the minimum value of the `x` coordinates from the supplied geometry.\n" + + "If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value.", + examples = @Example(file = "spatial_shapes", tag = "st_x_y_min_max") + ) + public StXMin( + Source source, + @Param( + name = "point", + type = { "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" }, + description = "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. " + + "If `null`, the function returns `null`." + ) Expression field + ) { + super(source, field); + } + + private StXMin(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected TypeResolution resolveType() { + return isSpatial(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + if (field().dataType() == GEO_POINT || field().dataType() == DataType.GEO_SHAPE) { + return new StXMinFromWKBGeoEvaluator.Factory(toEvaluator.apply(field()), source()); + } + return new StXMinFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StXMin(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StXMin::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getX(); + } + var envelope = SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMinX(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } + + @ConvertEvaluator(extraName = "FromWKBGeo", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinaryGeo(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getX(); + } + var envelope = SpatialEnvelopeVisitor.visitGeo(geometry, true); + if (envelope.isPresent()) { + return envelope.get().getMinX(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMax.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMax.java new file mode 100644 index 0000000000000..fbbea8e024a6b --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMax.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatial; + +/** + * Determines the maximum value of the y-coordinate from a geometry. + * The function `st_ymax` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_YMAX. + */ +public class StYMax extends UnaryScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "StYMax", StYMax::new); + + @FunctionInfo( + returnType = "double", + description = "Extracts the maximum value of the `y` coordinates from the supplied geometry.\n" + + "If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value.", + examples = @Example(file = "spatial_shapes", tag = "st_x_y_min_max") + ) + public StYMax( + Source source, + @Param( + name = "point", + type = { "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" }, + description = "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. " + + "If `null`, the function returns `null`." + ) Expression field + ) { + super(source, field); + } + + private StYMax(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected TypeResolution resolveType() { + return isSpatial(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + if (field().dataType() == GEO_POINT || field().dataType() == DataType.GEO_SHAPE) { + return new StYMaxFromWKBGeoEvaluator.Factory(toEvaluator.apply(field()), source()); + } + return new StYMaxFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StYMax(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StYMax::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getY(); + } + var envelope = SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMaxY(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } + + @ConvertEvaluator(extraName = "FromWKBGeo", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinaryGeo(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getY(); + } + var envelope = SpatialEnvelopeVisitor.visitGeo(geometry, true); + if (envelope.isPresent()) { + return envelope.get().getMaxY(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMin.java new file mode 100644 index 0000000000000..1707d3b4f2fb9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMin.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; +import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatial; + +/** + * Determines the minimum value of the y-coordinate from a geometry. + * The function `st_ymin` is defined in the OGC Simple Feature Access standard. + * Alternatively it is well described in PostGIS documentation at PostGIS:ST_YMIN. + */ +public class StYMin extends UnaryScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "StYMin", StYMin::new); + + @FunctionInfo( + returnType = "double", + description = "Extracts the minimum value of the `y` coordinates from the supplied geometry.\n" + + "If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value.", + examples = @Example(file = "spatial_shapes", tag = "st_x_y_min_max") + ) + public StYMin( + Source source, + @Param( + name = "point", + type = { "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" }, + description = "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. " + + "If `null`, the function returns `null`." + ) Expression field + ) { + super(source, field); + } + + private StYMin(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected TypeResolution resolveType() { + return isSpatial(field(), sourceText(), TypeResolutions.ParamOrdinal.DEFAULT); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + if (field().dataType() == GEO_POINT || field().dataType() == DataType.GEO_SHAPE) { + return new StYMinFromWKBGeoEvaluator.Factory(toEvaluator.apply(field()), source()); + } + return new StYMinFromWKBEvaluator.Factory(toEvaluator.apply(field()), source()); + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new StYMin(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StYMin::new, field()); + } + + @ConvertEvaluator(extraName = "FromWKB", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinary(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getY(); + } + var envelope = SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMinY(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } + + @ConvertEvaluator(extraName = "FromWKBGeo", warnExceptions = { IllegalArgumentException.class }) + static double fromWellKnownBinaryGeo(BytesRef wkb) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getY(); + } + var envelope = SpatialEnvelopeVisitor.visitGeo(geometry, true); + if (envelope.isPresent()) { + return envelope.get().getMinY(); + } + throw new IllegalArgumentException("Cannot determine envelope of geometry"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java index ee51a6f391a65..d3fc9e15e2e04 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java @@ -25,6 +25,9 @@ public class EsIndex implements Writeable { private final Map mapping; private final Map indexNameWithModes; + /** + * Intended for tests. Returns an index with an empty index mode map. + */ public EsIndex(String name, Map mapping) { this(name, mapping, Map.of()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 48bafd8eef00e..1eaade043658b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -57,7 +57,7 @@ protected List> batches() { } protected List> rules(boolean optimizeForEsSource) { - List> esSourceRules = new ArrayList<>(4); + List> esSourceRules = new ArrayList<>(6); esSourceRules.add(new ReplaceSourceAttributes()); if (optimizeForEsSource) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index 5e91425296822..dce828dbf192d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -14,7 +14,6 @@ public final class LogicalVerifier { - private static final PlanConsistencyChecker DEPENDENCY_CHECK = new PlanConsistencyChecker<>(); public static final LogicalVerifier INSTANCE = new LogicalVerifier(); private LogicalVerifier() {} @@ -25,7 +24,7 @@ public Failures verify(LogicalPlan plan) { Failures dependencyFailures = new Failures(); plan.forEachUp(p -> { - DEPENDENCY_CHECK.checkPlan(p, dependencyFailures); + PlanConsistencyChecker.checkPlan(p, dependencyFailures); if (failures.hasFailures() == false) { p.forEachExpression(ex -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java index 8bd8aba01fd21..4ec90fc1ed50a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java @@ -8,9 +8,12 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.common.Failure; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.optimizer.rules.PlanConsistencyChecker; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -24,17 +27,21 @@ public final class PhysicalVerifier { public static final PhysicalVerifier INSTANCE = new PhysicalVerifier(); - private static final PlanConsistencyChecker DEPENDENCY_CHECK = new PlanConsistencyChecker<>(); private PhysicalVerifier() {} /** Verifies the physical plan. */ public Collection verify(PhysicalPlan plan) { Set failures = new LinkedHashSet<>(); + Failures depFailures = new Failures(); + + // AwaitsFix https://github.com/elastic/elasticsearch/issues/118531 + var enriches = plan.collectFirstChildren(EnrichExec.class::isInstance); + if (enriches.isEmpty() == false && ((EnrichExec) enriches.get(0)).mode() == Enrich.Mode.REMOTE) { + return failures; + } plan.forEachDown(p -> { - // FIXME: re-enable - // DEPENDENCY_CHECK.checkPlan(p, failures); if (p instanceof FieldExtractExec fieldExtractExec) { Attribute sourceAttribute = fieldExtractExec.sourceAttribute(); if (sourceAttribute == null) { @@ -48,8 +55,13 @@ public Collection verify(PhysicalPlan plan) { ); } } + PlanConsistencyChecker.checkPlan(p, depFailures); }); + if (depFailures.hasFailures()) { + throw new IllegalStateException(depFailures.toString()); + } + return failures; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java index 30de8945a4c20..d5bd110e8df74 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PlanConsistencyChecker.java @@ -12,27 +12,42 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.plan.QueryPlan; +import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; +import org.elasticsearch.xpack.esql.plan.physical.BinaryExec; import java.util.HashSet; import java.util.Set; import static org.elasticsearch.xpack.esql.common.Failure.fail; -public class PlanConsistencyChecker

> { +public class PlanConsistencyChecker { /** * Check whether a single {@link QueryPlan} produces no duplicate attributes and its children provide all of its required * {@link QueryPlan#references() references}. Otherwise, add * {@link org.elasticsearch.xpack.esql.common.Failure Failure}s to the {@link Failures} object. */ - public void checkPlan(P p, Failures failures) { - AttributeSet refs = p.references(); - AttributeSet input = p.inputSet(); - AttributeSet missing = refs.subtract(input); - // TODO: for Joins, we should probably check if the required fields from the left child are actually in the left child, not - // just any child (and analogously for the right child). - if (missing.isEmpty() == false) { - failures.add(fail(p, "Plan [{}] optimized incorrectly due to missing references {}", p.nodeString(), missing)); + public static void checkPlan(QueryPlan p, Failures failures) { + if (p instanceof BinaryPlan binaryPlan) { + checkMissingBinary( + p, + binaryPlan.leftReferences(), + binaryPlan.left().outputSet(), + binaryPlan.rightReferences(), + binaryPlan.right().outputSet(), + failures + ); + } else if (p instanceof BinaryExec binaryExec) { + checkMissingBinary( + p, + binaryExec.leftReferences(), + binaryExec.left().outputSet(), + binaryExec.rightReferences(), + binaryExec.right().outputSet(), + failures + ); + } else { + checkMissing(p, p.references(), p.inputSet(), "missing references", failures); } Set outputAttributeNames = new HashSet<>(); @@ -45,4 +60,29 @@ public void checkPlan(P p, Failures failures) { } } } + + private static void checkMissingBinary( + QueryPlan plan, + AttributeSet leftReferences, + AttributeSet leftInput, + AttributeSet rightReferences, + AttributeSet rightInput, + Failures failures + ) { + checkMissing(plan, leftReferences, leftInput, "missing references from left hand side", failures); + checkMissing(plan, rightReferences, rightInput, "missing references from right hand side", failures); + } + + private static void checkMissing( + QueryPlan plan, + AttributeSet references, + AttributeSet input, + String detailErrorMessage, + Failures failures + ) { + AttributeSet missing = references.subtract(input); + if (missing.isEmpty() == false) { + failures.add(fail(plan, "Plan [{}] optimized incorrectly due to {} {}", plan.nodeString(), detailErrorMessage, missing)); + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java index 15e49c22a44db..9ec902e729f54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; @@ -23,6 +24,8 @@ import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import java.util.ArrayList; import java.util.List; @@ -76,11 +79,63 @@ protected LogicalPlan rule(Filter filter) { } else if (child instanceof OrderBy orderBy) { // swap the filter with its child plan = orderBy.replaceChild(filter.with(orderBy.child(), condition)); + } else if (child instanceof Join join) { + return pushDownPastJoin(filter, join); } // cannot push past a Limit, this could change the tailing result set returned return plan; } + private record ScopedFilter(List commonFilters, List leftFilters, List rightFilters) {} + + // split the filter condition in 3 parts: + // 1. filter scoped to the left + // 2. filter scoped to the right + // 3. filter that requires both sides to be evaluated + private static ScopedFilter scopeFilter(List filters, LogicalPlan left, LogicalPlan right) { + List rest = new ArrayList<>(filters); + List leftFilters = new ArrayList<>(); + List rightFilters = new ArrayList<>(); + + AttributeSet leftOutput = left.outputSet(); + AttributeSet rightOutput = right.outputSet(); + + // first remove things that are left scoped only + rest.removeIf(f -> f.references().subsetOf(leftOutput) && leftFilters.add(f)); + // followed by right scoped only + rest.removeIf(f -> f.references().subsetOf(rightOutput) && rightFilters.add(f)); + return new ScopedFilter(rest, leftFilters, rightFilters); + } + + private static LogicalPlan pushDownPastJoin(Filter filter, Join join) { + LogicalPlan plan = filter; + // pushdown only through LEFT joins + // TODO: generalize this for other join types + if (join.config().type() == JoinTypes.LEFT) { + LogicalPlan left = join.left(); + LogicalPlan right = join.right(); + + // split the filter condition in 3 parts: + // 1. filter scoped to the left + // 2. filter scoped to the right + // 3. filter that requires both sides to be evaluated + ScopedFilter scoped = scopeFilter(Predicates.splitAnd(filter.condition()), left, right); + // push the left scoped filter down to the left child, keep the rest intact + if (scoped.leftFilters.size() > 0) { + // push the filter down to the left child + left = new Filter(left.source(), left, Predicates.combineAnd(scoped.leftFilters)); + // update the join with the new left child + join = (Join) join.replaceLeft(left); + + // keep the remaining filters in place, otherwise return the new join; + Expression remainingFilter = Predicates.combineAnd(CollectionUtils.combine(scoped.commonFilters, scoped.rightFilters)); + plan = remainingFilter != null ? filter.with(join, remainingFilter) : join; + } + } + // ignore the rest of the join + return plan; + } + private static Function NO_OP = expression -> expression; private static LogicalPlan maybePushDownPastUnary( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java index fb9d3f7e2f91e..1cacebdf27cd2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; public final class PushDownAndCombineLimits extends OptimizerRules.OptimizerRule { @@ -63,8 +62,10 @@ public LogicalPlan rule(Limit limit) { } } } else if (limit.child() instanceof Join join) { - if (join.config().type() == JoinTypes.LEFT && join.right() instanceof LocalRelation) { - // This is a hash join from something like a lookup. + if (join.config().type() == JoinTypes.LEFT) { + // NOTE! This is only correct because our LEFT JOINs preserve the number of rows from the left hand side. + // This deviates from SQL semantics. In SQL, multiple matches on the right hand side lead to multiple rows in the output. + // For us, multiple matches on the right hand side are collected into multi-values. return join.replaceChildren(limit.replaceChild(join.left()), join.right()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java index ed8851b64c27e..61b1554fb71bc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; -import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.optimizer.rules.physical.ProjectAwayColumns; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; @@ -22,7 +21,6 @@ import java.util.ArrayList; import java.util.LinkedHashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; @@ -54,18 +52,9 @@ public PhysicalPlan apply(PhysicalPlan plan) { * it loads the field lazily. If we have more than one field we need to * make sure the fields are loaded for the standard hash aggregator. */ - if (p instanceof AggregateExec agg && agg.groupings().size() == 1) { - // CATEGORIZE requires the standard hash aggregator as well. - if (agg.groupings().get(0).anyMatch(e -> e instanceof Categorize) == false) { - var leaves = new LinkedList<>(); - // TODO: this seems out of place - agg.aggregates() - .stream() - .filter(a -> agg.groupings().contains(a) == false) - .forEach(a -> leaves.addAll(a.collectLeaves())); - var remove = agg.groupings().stream().filter(g -> leaves.contains(g) == false).toList(); - missing.removeAll(Expressions.references(remove)); - } + if (p instanceof AggregateExec agg) { + var ordinalAttributes = agg.ordinalAttributes(); + missing.removeAll(Expressions.references(ordinalAttributes)); } // add extractor diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java index 91cd7f7a15840..dbd22dd297f88 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.plan.logical; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.Arrays; @@ -30,6 +31,10 @@ public LogicalPlan right() { return right; } + public abstract AttributeSet leftReferences(); + + public abstract AttributeSet rightReferences(); + @Override public final BinaryPlan replaceChildren(List newChildren) { return replaceChildren(newChildren.get(0), newChildren.get(1)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java index 6af29fb23b3bb..a2c159e506880 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -97,6 +98,16 @@ public List output() { return lazyOutput; } + @Override + public AttributeSet leftReferences() { + return Expressions.references(config().leftFields()); + } + + @Override + public AttributeSet rightReferences() { + return Expressions.references(config().rightFields()); + } + public List rightOutputFields() { AttributeSet leftInputs = left().outputSet(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java index 891d03c571b27..3c2d49567813c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java @@ -18,10 +18,13 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Objects; @@ -181,7 +184,27 @@ public List output() { @Override protected AttributeSet computeReferences() { - return mode.isInputPartial() ? new AttributeSet(intermediateAttributes) : Aggregate.computeReferences(aggregates, groupings); + return mode.isInputPartial() + ? new AttributeSet(intermediateAttributes) + : Aggregate.computeReferences(aggregates, groupings).subtract(new AttributeSet(ordinalAttributes())); + } + + /** Returns the attributes that can be loaded from ordinals -- no explicit extraction is needed */ + public List ordinalAttributes() { + List orginalAttributs = new ArrayList<>(groupings.size()); + // Ordinals can be leveraged just for a single grouping. If there are multiple groupings, fields need to be laoded for the + // hash aggregator. + // CATEGORIZE requires the standard hash aggregator as well. + if (groupings().size() == 1 && groupings.get(0).anyMatch(e -> e instanceof Categorize) == false) { + var leaves = new HashSet<>(); + aggregates.stream().filter(a -> groupings.contains(a) == false).forEach(a -> leaves.addAll(a.collectLeaves())); + groupings.forEach(g -> { + if (leaves.contains(g) == false) { + orginalAttributs.add((Attribute) g); + } + }); + } + return orginalAttributs; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java index 6f200bad17a72..9a1b76205b595 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/BinaryExec.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.Source; import java.io.IOException; @@ -40,6 +41,10 @@ public PhysicalPlan right() { return right; } + public abstract AttributeSet leftReferences(); + + public abstract AttributeSet rightReferences(); + @Override public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java index 5530b3ea54d3d..d1d834b71047a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeExec.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -72,6 +73,12 @@ public boolean inBetweenAggs() { return inBetweenAggs; } + @Override + protected AttributeSet computeReferences() { + // ExchangeExec does no input referencing, it only outputs all synthetic attributes, "sourced" from remote exchanges. + return AttributeSet.EMPTY; + } + @Override public UnaryExec replaceChild(PhysicalPlan newChild) { return new ExchangeExec(source(), output, inBetweenAggs, newChild); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java index 35c6e4846bd88..ec996c5c84064 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java @@ -89,12 +89,7 @@ public static Attribute extractSourceAttributesFrom(PhysicalPlan plan) { @Override protected AttributeSet computeReferences() { - AttributeSet required = new AttributeSet(docValuesAttributes); - - required.add(sourceAttribute); - required.addAll(attributesToExtract); - - return required; + return sourceAttribute != null ? new AttributeSet(sourceAttribute) : AttributeSet.EMPTY; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java index 5ae3702993fcb..362c83bf76213 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java @@ -119,6 +119,16 @@ protected AttributeSet computeReferences() { return Expressions.references(leftFields); } + @Override + public AttributeSet leftReferences() { + return Expressions.references(leftFields); + } + + @Override + public AttributeSet rightReferences() { + return Expressions.references(rightFields); + } + @Override public HashJoinExec replaceChildren(PhysicalPlan left, PhysicalPlan right) { return new HashJoinExec(source(), left, right, matchFields, leftFields, rightFields, output); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java index 8b1cc047309e7..2aff38993aa98 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java @@ -93,9 +93,9 @@ public List addedFields() { public List output() { if (lazyOutput == null) { lazyOutput = new ArrayList<>(left().output()); - for (Attribute attr : addedFields) { - lazyOutput.add(attr); - } + var addedFieldsNames = addedFields.stream().map(Attribute::name).toList(); + lazyOutput.removeIf(a -> addedFieldsNames.contains(a.name())); + lazyOutput.addAll(addedFields); } return lazyOutput; } @@ -119,6 +119,21 @@ protected AttributeSet computeReferences() { return Expressions.references(leftFields); } + @Override + public AttributeSet leftReferences() { + return Expressions.references(leftFields); + } + + @Override + public AttributeSet rightReferences() { + // TODO: currently it's hard coded that we add all fields from the lookup index. But the output we "officially" get from the right + // hand side is inconsistent: + // - After logical optimization, there's a FragmentExec with an EsRelation on the right hand side with all the fields. + // - After local physical optimization, there's just an EsQueryExec here, with no fields other than _doc mentioned and we don't + // insert field extractions in the plan, either. + return AttributeSet.EMPTY; + } + @Override public LookupJoinExec replaceChildren(PhysicalPlan left, PhysicalPlan right) { return new LookupJoinExec(source(), left, right, leftFields, rightFields, addedFields); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java index 35aba7665ec87..57ba1c8016feb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java @@ -297,9 +297,9 @@ private void aggregatesToFactory( // coordinator/exchange phase else if (mode == AggregatorMode.FINAL || mode == AggregatorMode.INTERMEDIATE) { if (grouping) { - sourceAttr = aggregateMapper.mapGrouping(aggregateFunction); + sourceAttr = aggregateMapper.mapGrouping(ne); } else { - sourceAttr = aggregateMapper.mapNonGrouping(aggregateFunction); + sourceAttr = aggregateMapper.mapNonGrouping(ne); } } else { throw new EsqlIllegalArgumentException("illegal aggregation mode"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 41a6a17a50dcb..138165bd4f0bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; @@ -91,7 +92,7 @@ final class AggregateMapper { private record AggDef(Class aggClazz, String type, String extra, boolean grouping) {} /** Map of AggDef types to intermediate named expressions. */ - private static final Map> mapper = AGG_FUNCTIONS.stream() + private static final Map> MAPPER = AGG_FUNCTIONS.stream() .flatMap(AggregateMapper::typeAndNames) .flatMap(AggregateMapper::groupingAndNonGrouping) .collect(Collectors.toUnmodifiableMap(aggDef -> aggDef, AggregateMapper::lookupIntermediateState)); @@ -103,50 +104,57 @@ private record AggDef(Class aggClazz, String type, String extra, boolean grou cache = new HashMap<>(); } - public List mapNonGrouping(List aggregates) { + public List mapNonGrouping(List aggregates) { return doMapping(aggregates, false); } - public List mapNonGrouping(Expression aggregate) { + public List mapNonGrouping(NamedExpression aggregate) { return map(aggregate, false).toList(); } - public List mapGrouping(List aggregates) { + public List mapGrouping(List aggregates) { return doMapping(aggregates, true); } - private List doMapping(List aggregates, boolean grouping) { + private List doMapping(List aggregates, boolean grouping) { AttributeMap attrToExpressions = new AttributeMap<>(); - aggregates.stream().flatMap(agg -> map(agg, grouping)).forEach(ne -> attrToExpressions.put(ne.toAttribute(), ne)); + aggregates.stream().flatMap(ne -> map(ne, grouping)).forEach(ne -> attrToExpressions.put(ne.toAttribute(), ne)); return attrToExpressions.values().stream().toList(); } - public List mapGrouping(Expression aggregate) { + public List mapGrouping(NamedExpression aggregate) { return map(aggregate, true).toList(); } - private Stream map(Expression aggregate, boolean grouping) { - return cache.computeIfAbsent(Alias.unwrap(aggregate), aggKey -> computeEntryForAgg(aggKey, grouping)).stream(); + private Stream map(NamedExpression ne, boolean grouping) { + return cache.computeIfAbsent(Alias.unwrap(ne), aggKey -> computeEntryForAgg(ne.name(), aggKey, grouping)).stream(); } - private static List computeEntryForAgg(Expression aggregate, boolean grouping) { - var aggDef = aggDefOrNull(aggregate, grouping); - if (aggDef != null) { - var is = getNonNull(aggDef); - var exp = isToNE(is).toList(); - return exp; + private static List computeEntryForAgg(String aggAlias, Expression aggregate, boolean grouping) { + if (aggregate instanceof AggregateFunction aggregateFunction) { + return entryForAgg(aggAlias, aggregateFunction, grouping); } if (aggregate instanceof FieldAttribute || aggregate instanceof MetadataAttribute || aggregate instanceof ReferenceAttribute) { - // This condition is a little pedantic, but do we expected other expressions here? if so, then add them + // This condition is a little pedantic, but do we expect other expressions here? if so, then add them return List.of(); - } else { - throw new EsqlIllegalArgumentException("unknown agg: " + aggregate.getClass() + ": " + aggregate); } + throw new EsqlIllegalArgumentException("unknown agg: " + aggregate.getClass() + ": " + aggregate); + } + + private static List entryForAgg(String aggAlias, AggregateFunction aggregateFunction, boolean grouping) { + var aggDef = new AggDef( + aggregateFunction.getClass(), + dataTypeToString(aggregateFunction.field().dataType(), aggregateFunction.getClass()), + aggregateFunction instanceof SpatialCentroid ? "SourceValues" : "", + grouping + ); + var is = getNonNull(aggDef); + return isToNE(is, aggAlias).toList(); } /** Gets the agg from the mapper - wrapper around map::get for more informative failure.*/ private static List getNonNull(AggDef aggDef) { - var l = mapper.get(aggDef); + var l = MAPPER.get(aggDef); if (l == null) { throw new EsqlIllegalArgumentException("Cannot find intermediate state for: " + aggDef); } @@ -199,18 +207,6 @@ private static Stream groupingAndNonGrouping(Tuple, Tuple lookupIntermediateState(AggDef aggDef) { try { @@ -257,7 +253,7 @@ private static String determinePackageName(Class clazz) { } /** Maps intermediate state description to named expressions. */ - private static Stream isToNE(List intermediateStateDescs) { + private static Stream isToNE(List intermediateStateDescs, String aggAlias) { return intermediateStateDescs.stream().map(is -> { final DataType dataType; if (Strings.isEmpty(is.dataType())) { @@ -265,7 +261,7 @@ private static Stream isToNE(List interm } else { dataType = DataType.fromEs(is.dataType()); } - return new ReferenceAttribute(Source.EMPTY, is.name(), dataType); + return new ReferenceAttribute(Source.EMPTY, Attribute.rawTemporaryName(aggAlias, is.name()), dataType); }); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 7820f0f657f7f..43bbf9a5f4ff1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.TranslationAware; import org.elasticsearch.xpack.esql.core.expression.TypedAttribute; import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.Range; @@ -100,7 +101,11 @@ public final class EsqlExpressionTranslators { ); public static Query toQuery(Expression e, TranslatorHandler handler) { + if (e instanceof TranslationAware ta) { + return ta.asQuery(handler); + } Query translation = null; + for (ExpressionTranslator translator : QUERY_TRANSLATORS) { translation = translator.translate(e, handler); if (translation != null) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java index 6014e24e39c5f..b6fa82360d1e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java @@ -19,6 +19,8 @@ import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -32,17 +34,24 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField.NO_INDEX_PLACEHOLDER; +import static org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField.NO_INDICES_OR_ALIASES_ARRAY; + final class DataNodeRequest extends TransportRequest implements IndicesRequest.Replaceable { + private static final Logger logger = LogManager.getLogger(DataNodeRequest.class); + private final String sessionId; private final Configuration configuration; private final String clusterAlias; - private final List shardIds; private final Map aliasFilters; private final PhysicalPlan plan; + private List shardIds; private String[] indices; private final IndicesOptions indicesOptions; @@ -115,6 +124,10 @@ public String[] indices() { @Override public IndicesRequest indices(String... indices) { this.indices = indices; + if (Arrays.equals(NO_INDICES_OR_ALIASES_ARRAY, indices) || Arrays.asList(indices).contains(NO_INDEX_PLACEHOLDER)) { + logger.trace(() -> format("Indices empty after index resolution, also clearing shardIds %s", shardIds)); + this.shardIds = Collections.emptyList(); + } return this; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlLicenseChecker.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlLicenseChecker.java new file mode 100644 index 0000000000000..0a52ee75de3b2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlLicenseChecker.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; + +public class EsqlLicenseChecker { + + public static final LicensedFeature.Momentary CCS_FEATURE = LicensedFeature.momentary( + null, + "esql-ccs", + License.OperationMode.ENTERPRISE + ); + + /** + * Only call this method once you know the user is doing a cross-cluster query, as it will update + * the license_usage timestamp for the esql-ccs feature if the license is Enterprise (or Trial). + * @param licenseState + * @return true if the user has a license that allows ESQL CCS. + */ + public static boolean isCcsAllowed(XPackLicenseState licenseState) { + if (licenseState == null) { + return false; + } + return CCS_FEATURE.check(licenseState); + } + + /** + * @param licenseState existing license state. Need to extract info on the current installed license. + * @return ElasticsearchStatusException with an error message informing the caller what license is needed + * to run ES|QL cross-cluster searches and what license (if any) was found. + */ + public static ElasticsearchStatusException invalidLicenseForCcsException(XPackLicenseState licenseState) { + String message = "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: "; + if (licenseState == null) { + message += "none"; + } else { + message += licenseState.statusDescription(); + } + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 4f7c620bc8d12..83480f6651abf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -298,6 +298,9 @@ public void analyzedPlan( .map(e -> new EnrichPolicyResolver.UnresolvedPolicy((String) e.policyName().fold(), e.mode())) .collect(Collectors.toSet()); final List indices = preAnalysis.indices; + + EsqlSessionCCSUtils.checkForCcsLicense(indices, indicesExpressionGrouper, verifier.licenseState()); + // TODO: make a separate call for lookup indices final Set targetClusters = enrichPolicyResolver.groupIndicesPerCluster( indices.stream().flatMap(t -> Arrays.stream(Strings.commaDelimitedListToStringArray(t.id().index()))).toArray(String[]::new) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java index 4fe2fef7e3f45..662572c466511 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java @@ -9,17 +9,24 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.IndicesExpressionGrouper; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.analysis.TableInfo; import org.elasticsearch.xpack.esql.index.IndexResolution; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; @@ -255,6 +262,9 @@ static boolean missingIndicesIsFatal(String clusterAlias, EsqlExecutionInfo exec } private static boolean concreteIndexRequested(String indexExpression) { + if (Strings.isNullOrBlank(indexExpression)) { + return false; + } for (String expr : indexExpression.split(",")) { if (expr.charAt(0) == '<' || expr.startsWith("-<")) { // skip date math expressions @@ -288,4 +298,37 @@ static void updateExecutionInfoAtEndOfPlanning(EsqlExecutionInfo execInfo) { } } } + + /** + * Checks the index expression for the presence of remote clusters. If found, it will ensure that the caller + * has a valid Enterprise (or Trial) license on the querying cluster. + * @param indices index expression requested by user + * @param indicesGrouper grouper of index expressions by cluster alias + * @param licenseState license state on the querying cluster + * @throws org.elasticsearch.ElasticsearchStatusException if the license is not valid (or present) for ES|QL CCS search. + */ + public static void checkForCcsLicense( + List indices, + IndicesExpressionGrouper indicesGrouper, + XPackLicenseState licenseState + ) { + for (TableInfo tableInfo : indices) { + Map groupedIndices; + try { + groupedIndices = indicesGrouper.groupIndices(IndicesOptions.DEFAULT, tableInfo.id().index()); + } catch (NoSuchRemoteClusterException e) { + if (EsqlLicenseChecker.isCcsAllowed(licenseState)) { + throw e; + } else { + throw EsqlLicenseChecker.invalidLicenseForCcsException(licenseState); + } + } + // check if it is a cross-cluster query + if (groupedIndices.size() > 1 || groupedIndices.containsKey(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY) == false) { + if (EsqlLicenseChecker.isCcsAllowed(licenseState) == false) { + throw EsqlLicenseChecker.invalidLicenseForCcsException(licenseState); + } + } + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 5330ddf95a752..b54baddc88b28 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -260,7 +260,7 @@ public final void test() throws Throwable { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V4.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V5.capabilityName()) ); assumeFalse( "can't use TERM function in csv tests", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index 4e89a09db9ed4..5e79e40b7e938 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; @@ -104,6 +105,11 @@ public static LogicalPlan analyze(String query, String mapping, QueryParams para return analyzer.analyze(plan); } + public static IndexResolution loadMapping(String resource, String indexName, IndexMode indexMode) { + EsIndex test = new EsIndex(indexName, EsqlTestUtils.loadMapping(resource), Map.of(indexName, indexMode)); + return IndexResolution.valid(test); + } + public static IndexResolution loadMapping(String resource, String indexName) { EsIndex test = new EsIndex(indexName, EsqlTestUtils.loadMapping(resource)); return IndexResolution.valid(test); @@ -118,7 +124,7 @@ public static IndexResolution expandedDefaultIndexResolution() { } public static IndexResolution defaultLookupResolution() { - return loadMapping("mapping-languages.json", "languages_lookup"); + return loadMapping("mapping-languages.json", "languages_lookup", IndexMode.LOOKUP); } public static EnrichResolution defaultEnrichResolution() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index dbe2c5f463f50..2f192936ba86c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2146,7 +2146,7 @@ public void testLookupMatchTypeWrong() { } public void testLookupJoinUnknownIndex() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V4.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); String errorMessage = "Unknown index [foobar]"; IndexResolution missingLookupIndex = IndexResolution.invalid(errorMessage); @@ -2175,7 +2175,7 @@ public void testLookupJoinUnknownIndex() { } public void testLookupJoinUnknownField() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V4.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); String query = "FROM test | LOOKUP JOIN languages_lookup ON last_name"; String errorMessage = "1:45: Unknown column [last_name] in right side of join"; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 92cac30f1bb20..4b916106165fb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; -import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; @@ -22,7 +21,6 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.QueryParam; import org.elasticsearch.xpack.esql.parser.QueryParams; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -1805,29 +1803,6 @@ public void testToDatePeriodToTimeDurationWithInvalidType() { ); } - public void testNonMetadataScore() { - assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); - assertEquals("1:12: `_score` is a reserved METADATA attribute", error("from foo | eval _score = 10")); - - assertEquals( - "1:48: `_score` is a reserved METADATA attribute", - error("from foo metadata _score | where qstr(\"bar\") | eval _score = _score + 1") - ); - } - - public void testScoreRenaming() { - assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); - assertEquals("1:33: `_score` is a reserved METADATA attribute", error("from foo METADATA _id, _score | rename _id as _score")); - - assertTrue(passes("from foo metadata _score | rename _score as foo").stream().anyMatch(a -> a.name().equals("foo"))); - } - - private List passes(String query) { - LogicalPlan logicalPlan = defaultAnalyzer.analyze(parser.createStatement(query)); - assertTrue(logicalPlan.resolved()); - return logicalPlan.output(); - } - public void testIntervalAsString() { // DateTrunc for (String interval : List.of("1 minu", "1 dy", "1.5 minutes", "0.5 days", "minutes 1", "day 5")) { @@ -1894,38 +1869,35 @@ public void testIntervalAsString() { ); } - public void testCategorizeSingleGrouping() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - - query("from test | STATS COUNT(*) BY CATEGORIZE(first_name)"); - query("from test | STATS COUNT(*) BY cat = CATEGORIZE(first_name)"); + public void testCategorizeOnlyFirstGrouping() { + query("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name)"); + query("FROM test | STATS COUNT(*) BY cat = CATEGORIZE(first_name)"); + query("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no"); + query("FROM test | STATS COUNT(*) BY a = CATEGORIZE(first_name), b = emp_no"); assertEquals( - "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("from test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no") + "1:39: CATEGORIZE grouping function [CATEGORIZE(first_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY emp_no, CATEGORIZE(first_name)") ); assertEquals( - "1:39: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY emp_no, CATEGORIZE(first_name)") + "1:55: CATEGORIZE grouping function [CATEGORIZE(last_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(last_name)") ); assertEquals( - "1:35: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY a = CATEGORIZE(first_name), b = emp_no") + "1:55: CATEGORIZE grouping function [CATEGORIZE(first_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(first_name)") ); assertEquals( - "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings\n" - + "line 1:55: cannot use CATEGORIZE grouping function [CATEGORIZE(last_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(last_name)") + "1:63: CATEGORIZE grouping function [CATEGORIZE(last_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no, CATEGORIZE(last_name)") ); assertEquals( - "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", - error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(first_name)") + "1:63: CATEGORIZE grouping function [CATEGORIZE(first_name)] can only be in the first grouping expression", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no, CATEGORIZE(first_name)") ); } public void testCategorizeNestedGrouping() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - query("from test | STATS COUNT(*) BY CATEGORIZE(LENGTH(first_name)::string)"); assertEquals( @@ -1939,8 +1911,6 @@ public void testCategorizeNestedGrouping() { } public void testCategorizeWithinAggregations() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - query("from test | STATS MV_COUNT(cat), COUNT(*) BY cat = CATEGORIZE(first_name)"); query("from test | STATS MV_COUNT(CATEGORIZE(first_name)), COUNT(*) BY cat = CATEGORIZE(first_name)"); query("from test | STATS MV_COUNT(CATEGORIZE(first_name)), COUNT(*) BY CATEGORIZE(first_name)"); @@ -1968,6 +1938,24 @@ public void testCategorizeWithinAggregations() { ); } + public void testCategorizeWithFilteredAggregations() { + query("FROM test | STATS COUNT(*) WHERE first_name == \"John\" BY CATEGORIZE(last_name)"); + query("FROM test | STATS COUNT(*) WHERE last_name == \"Doe\" BY CATEGORIZE(last_name)"); + + assertEquals( + "1:34: can only use grouping function [CATEGORIZE(first_name)] as part of the BY clause", + error("FROM test | STATS COUNT(*) WHERE CATEGORIZE(first_name) == \"John\" BY CATEGORIZE(last_name)") + ); + assertEquals( + "1:34: can only use grouping function [CATEGORIZE(last_name)] as part of the BY clause", + error("FROM test | STATS COUNT(*) WHERE CATEGORIZE(last_name) == \"Doe\" BY CATEGORIZE(last_name)") + ); + assertEquals( + "1:34: cannot reference CATEGORIZE grouping function [category] within an aggregation filter", + error("FROM test | STATS COUNT(*) WHERE category == \"Doe\" BY category = CATEGORIZE(last_name)") + ); + } + public void testSortByAggregate() { assertEquals("1:18: Aggregate functions are not allowed in SORT [COUNT]", error("ROW a = 1 | SORT count(*)")); assertEquals("1:28: Aggregate functions are not allowed in SORT [COUNT]", error("ROW a = 1 | SORT to_string(count(*))")); @@ -1976,7 +1964,7 @@ public void testSortByAggregate() { } public void testLookupJoinDataTypeMismatch() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V4.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); query("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeTests.java new file mode 100644 index 0000000000000..ac87d45491447 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StEnvelopeTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_envelope") +public class StEnvelopeTests extends AbstractScalarFunctionTestCase { + public StEnvelopeTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedGeo = "StEnvelopeFromWKBGeoEvaluator[field=Attribute[channel=0]]"; + String expectedCartesian = "StEnvelopeFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedGeo, GEO_SHAPE, StEnvelopeTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianPoint( + suppliers, + expectedCartesian, + CARTESIAN_SHAPE, + StEnvelopeTests::valueOfCartesian, + List.of() + ); + TestCaseSupplier.forUnaryGeoShape(suppliers, expectedGeo, GEO_SHAPE, StEnvelopeTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianShape( + suppliers, + expectedCartesian, + CARTESIAN_SHAPE, + StEnvelopeTests::valueOfCartesian, + List.of() + ); + return parameterSuppliersFromTypedDataWithDefaultChecks( + false, + suppliers, + (v, p) -> "geo_point, cartesian_point, geo_shape or cartesian_shape" + ); + } + + private static BytesRef valueOfGeo(BytesRef wkb) { + return valueOf(wkb, true); + } + + private static BytesRef valueOfCartesian(BytesRef wkb) { + return valueOf(wkb, false); + } + + private static BytesRef valueOf(BytesRef wkb, boolean geo) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point) { + return wkb; + } + var envelope = geo ? SpatialEnvelopeVisitor.visitGeo(geometry, true) : SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return UNSPECIFIED.asWkb(envelope.get()); + } + throw new IllegalArgumentException("Geometry is empty"); + } + + @Override + protected Expression build(Source source, List args) { + return new StEnvelope(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxTests.java new file mode 100644 index 0000000000000..dc6e61e44f599 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMaxTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_xmax") +public class StXMaxTests extends AbstractScalarFunctionTestCase { + public StXMaxTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedGeo = "StXMaxFromWKBGeoEvaluator[field=Attribute[channel=0]]"; + String expectedCartesian = "StXMaxFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedGeo, DOUBLE, StXMaxTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedCartesian, DOUBLE, StXMaxTests::valueOfCartesian, List.of()); + TestCaseSupplier.forUnaryGeoShape(suppliers, expectedGeo, DOUBLE, StXMaxTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianShape(suppliers, expectedCartesian, DOUBLE, StXMaxTests::valueOfCartesian, List.of()); + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + suppliers, + (v, p) -> "geo_point, cartesian_point, geo_shape or cartesian_shape" + ); + } + + private static double valueOfGeo(BytesRef wkb) { + return valueOf(wkb, true); + } + + private static double valueOfCartesian(BytesRef wkb) { + return valueOf(wkb, false); + } + + private static double valueOf(BytesRef wkb, boolean geo) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getX(); + } + var envelope = geo ? SpatialEnvelopeVisitor.visitGeo(geometry, true) : SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMaxX(); + } + throw new IllegalArgumentException("Geometry is empty"); + } + + @Override + protected Expression build(Source source, List args) { + return new StXMax(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinTests.java new file mode 100644 index 0000000000000..8c06d18b1e281 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXMinTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_xmin") +public class StXMinTests extends AbstractScalarFunctionTestCase { + public StXMinTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedGeo = "StXMinFromWKBGeoEvaluator[field=Attribute[channel=0]]"; + String expectedCartesian = "StXMinFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedGeo, DOUBLE, StXMinTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedCartesian, DOUBLE, StXMinTests::valueOfCartesian, List.of()); + TestCaseSupplier.forUnaryGeoShape(suppliers, expectedGeo, DOUBLE, StXMinTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianShape(suppliers, expectedCartesian, DOUBLE, StXMinTests::valueOfCartesian, List.of()); + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + suppliers, + (v, p) -> "geo_point, cartesian_point, geo_shape or cartesian_shape" + ); + } + + private static double valueOfGeo(BytesRef wkb) { + return valueOf(wkb, true); + } + + private static double valueOfCartesian(BytesRef wkb) { + return valueOf(wkb, false); + } + + private static double valueOf(BytesRef wkb, boolean geo) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getX(); + } + var envelope = geo ? SpatialEnvelopeVisitor.visitGeo(geometry, true) : SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMinX(); + } + throw new IllegalArgumentException("Geometry is empty"); + } + + @Override + protected Expression build(Source source, List args) { + return new StXMin(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxTests.java new file mode 100644 index 0000000000000..7222d7517f7ff --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_ymax") +public class StYMaxTests extends AbstractScalarFunctionTestCase { + public StYMaxTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedGeo = "StYMaxFromWKBGeoEvaluator[field=Attribute[channel=0]]"; + String expectedCartesian = "StYMaxFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedGeo, DOUBLE, StYMaxTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedCartesian, DOUBLE, StYMaxTests::valueOfCartesian, List.of()); + TestCaseSupplier.forUnaryGeoShape(suppliers, expectedGeo, DOUBLE, StYMaxTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianShape(suppliers, expectedCartesian, DOUBLE, StYMaxTests::valueOfCartesian, List.of()); + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + suppliers, + (v, p) -> "geo_point, cartesian_point, geo_shape or cartesian_shape" + ); + } + + private static double valueOfGeo(BytesRef wkb) { + return valueOf(wkb, true); + } + + private static double valueOfCartesian(BytesRef wkb) { + return valueOf(wkb, false); + } + + private static double valueOf(BytesRef wkb, boolean geo) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getY(); + } + var envelope = geo ? SpatialEnvelopeVisitor.visitGeo(geometry, true) : SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMaxY(); + } + throw new IllegalArgumentException("Geometry is empty"); + } + + @Override + protected Expression build(Source source, List args) { + return new StYMax(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinTests.java new file mode 100644 index 0000000000000..843c7bb649114 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMinTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; + +@FunctionName("st_ymin") +public class StYMinTests extends AbstractScalarFunctionTestCase { + public StYMinTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + String expectedGeo = "StYMinFromWKBGeoEvaluator[field=Attribute[channel=0]]"; + String expectedCartesian = "StYMinFromWKBEvaluator[field=Attribute[channel=0]]"; + final List suppliers = new ArrayList<>(); + TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedGeo, DOUBLE, StYMinTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedCartesian, DOUBLE, StYMinTests::valueOfCartesian, List.of()); + TestCaseSupplier.forUnaryGeoShape(suppliers, expectedGeo, DOUBLE, StYMinTests::valueOfGeo, List.of()); + TestCaseSupplier.forUnaryCartesianShape(suppliers, expectedCartesian, DOUBLE, StYMinTests::valueOfCartesian, List.of()); + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + suppliers, + (v, p) -> "geo_point, cartesian_point, geo_shape or cartesian_shape" + ); + } + + private static double valueOfGeo(BytesRef wkb) { + return valueOf(wkb, true); + } + + private static double valueOfCartesian(BytesRef wkb) { + return valueOf(wkb, false); + } + + private static double valueOf(BytesRef wkb, boolean geo) { + var geometry = UNSPECIFIED.wkbToGeometry(wkb); + if (geometry instanceof Point point) { + return point.getY(); + } + var envelope = geo ? SpatialEnvelopeVisitor.visitGeo(geometry, true) : SpatialEnvelopeVisitor.visitCartesian(geometry); + if (envelope.isPresent()) { + return envelope.get().getMinY(); + } + throw new IllegalArgumentException("Geometry is empty"); + } + + @Override + protected Expression build(Source source, List args) { + return new StYMin(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index c01668d0e6c48..2d3ba1be7a643 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -250,7 +250,7 @@ public void testCountFieldWithEval() { var esStatsQuery = as(exg.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); assertThat(stat.query(), is(QueryBuilders.existsQuery("salary"))); } @@ -271,7 +271,7 @@ public void testCountOneFieldWithFilter() { var exchange = as(agg.child(), ExchangeExec.class); var esStatsQuery = as(exchange.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); Source source = new Source(2, 8, "salary > 1000"); var exists = QueryBuilders.existsQuery("salary"); @@ -381,7 +381,7 @@ public void testAnotherCountAllWithFilter() { var exchange = as(agg.child(), ExchangeExec.class); var esStatsQuery = as(exchange.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var source = ((SingleValueQuery.Builder) esStatsQuery.query()).source(); var expected = wrapWithSingleQuery(query, QueryBuilders.rangeQuery("emp_no").gt(10010), "emp_no", source); assertThat(expected.toString(), is(esStatsQuery.query().toString())); @@ -992,7 +992,7 @@ public boolean exists(String field) { var exchange = as(agg.child(), ExchangeExec.class); assertThat(exchange.inBetweenAggs(), is(true)); var localSource = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(localSource.output()), contains("count", "seen")); + assertThat(Expressions.names(localSource.output()), contains("$$c$count", "$$c$seen")); } /** @@ -1147,7 +1147,7 @@ public void testIsNotNull_TextField_Pushdown_WithCount() { var exg = as(agg.child(), ExchangeExec.class); var esStatsQuery = as(exg.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("$$c$count", "$$c$seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); assertThat(stat.query(), is(QueryBuilders.existsQuery("job"))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 737bb2eb23a6f..f7c6a03d3614c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.Build; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; @@ -41,6 +42,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -113,7 +115,9 @@ import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; +import org.elasticsearch.xpack.esql.plan.logical.join.LookupJoin; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -139,6 +143,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.TWO; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.EsqlTestUtils.emptySource; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.fieldAttribute; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; import static org.elasticsearch.xpack.esql.EsqlTestUtils.localSource; @@ -146,6 +151,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.analysis.Analyzer.NO_FIELDS; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.defaultLookupResolution; import static org.elasticsearch.xpack.esql.core.expression.Literal.NULL; import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; @@ -213,12 +219,23 @@ public static void init() { enrichResolution = new EnrichResolution(); AnalyzerTestUtils.loadEnrichPolicyResolution(enrichResolution, "languages_idx", "id", "languages_idx", "mapping-languages.json"); + var lookupMapping = loadMapping("mapping-languages.json"); + IndexResolution lookupResolution = IndexResolution.valid( + new EsIndex("language_code", lookupMapping, Map.of("language_code", IndexMode.LOOKUP)) + ); + // Most tests used data from the test index, so we load it here, and use it in the plan() function. mapping = loadMapping("mapping-basic.json"); EsIndex test = new EsIndex("test", mapping, Map.of("test", IndexMode.STANDARD)); IndexResolution getIndexResult = IndexResolution.valid(test); analyzer = new Analyzer( - new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, enrichResolution), + new AnalyzerContext( + EsqlTestUtils.TEST_CFG, + new EsqlFunctionRegistry(), + getIndexResult, + defaultLookupResolution(), + enrichResolution + ), TEST_VERIFIER ); @@ -1212,8 +1229,6 @@ public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUsedInAgg * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] */ public void testCombineProjectionWithCategorizeGrouping() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - var plan = plan(""" from test | eval k = first_name, k1 = k @@ -1294,6 +1309,26 @@ public void testCombineLimits() { ); } + public void testPushdownLimitsPastLeftJoin() { + var leftChild = emptySource(); + var rightChild = new LocalRelation(Source.EMPTY, List.of(fieldAttribute()), LocalSupplier.EMPTY); + assertNotEquals(leftChild, rightChild); + + var joinConfig = new JoinConfig(JoinTypes.LEFT, List.of(), List.of(), List.of()); + var join = switch (randomIntBetween(0, 2)) { + case 0 -> new Join(EMPTY, leftChild, rightChild, joinConfig); + case 1 -> new LookupJoin(EMPTY, leftChild, rightChild, joinConfig); + case 2 -> new InlineJoin(EMPTY, leftChild, rightChild, joinConfig); + default -> throw new IllegalArgumentException(); + }; + + var limit = new Limit(EMPTY, L(10), join); + + var optimizedPlan = new PushDownAndCombineLimits().rule(limit); + + assertEquals(join.replaceChildren(limit.replaceChild(join.left()), join.right()), optimizedPlan); + } + public void testMultipleCombineLimits() { var numberOfLimits = randomIntBetween(3, 10); var minimum = randomIntBetween(10, 99); @@ -3949,8 +3984,6 @@ public void testNestedExpressionsInGroups() { * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] */ public void testNestedExpressionsInGroupsWithCategorize() { - assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V5.isEnabled()); - var plan = optimizedPlan(""" from test | stats c = count(salary) by CATEGORIZE(CONCAT(first_name, "abc")) @@ -4877,6 +4910,27 @@ public void testPlanSanityCheck() throws Exception { assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references [salary")); } + public void testPlanSanityCheckWithBinaryPlans() throws Exception { + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); + + var plan = optimizedPlan(""" + FROM test + | RENAME languages AS language_code + | LOOKUP JOIN languages_lookup ON language_code + """); + + var project = as(plan, Project.class); + var join = as(project.child(), Join.class); + + var joinWithInvalidLeftPlan = join.replaceChildren(join.right(), join.right()); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> logicalOptimizer.optimize(joinWithInvalidLeftPlan)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from left hand side [language_code")); + + var joinWithInvalidRightPlan = join.replaceChildren(join.left(), join.left()); + e = expectThrows(IllegalStateException.class, () -> logicalOptimizer.optimize(joinWithInvalidRightPlan)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from right hand side [language_code")); + } + // https://github.com/elastic/elasticsearch/issues/104995 public void testNoWrongIsNotNullPruning() { var plan = optimizedPlan(""" @@ -5695,7 +5749,7 @@ public void testLookupSimple() { String query = """ FROM test | RENAME languages AS int - | LOOKUP int_number_names ON int"""; + | LOOKUP_?? int_number_names ON int"""; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP' expecting {")); @@ -5775,7 +5829,7 @@ public void testLookupStats() { String query = """ FROM test | RENAME languages AS int - | LOOKUP int_number_names ON int + | LOOKUP_?? int_number_names ON int | STATS MIN(emp_no) BY name"""; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); @@ -5844,6 +5898,251 @@ public void testLookupStats() { ); } + // + // Lookup JOIN + // + + /** + * Filter on join keys should be pushed down + * Expects + * Project[[_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, gender{f}#9, hire_date{f}#14, job{f}#15, job.raw{f}#16, lang + * uage_code{r}#4, last_name{f}#11, long_noidx{f}#17, salary{f}#12, language_name{f}#19]] + * \_Join[LEFT,[language_code{r}#4],[language_code{r}#4],[language_code{f}#18]] + * |_EsqlProject[[_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, gender{f}#9, hire_date{f}#14, job{f}#15, job.raw{f}#16, lang + * uages{f}#10 AS language_code, last_name{f}#11, long_noidx{f}#17, salary{f}#12]] + * | \_Limit[1000[INTEGER]] + * | \_Filter[languages{f}#10 > 1[INTEGER]] + * | \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + * \_EsRelation[language_code][LOOKUP][language_code{f}#18, language_name{f}#19] + */ + public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); + + String query = """ + FROM test + | RENAME languages AS language_code + | LOOKUP JOIN language_code ON language_code + | WHERE language_code > 1 + """; + var plan = optimizedPlan(query); + + var project = as(plan, Project.class); + var join = as(project.child(), Join.class); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); + project = as(join.left(), Project.class); + var limit = as(project.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(1000)); + var filter = as(limit.child(), Filter.class); + // assert that the rename has been undone + var op = as(filter.condition(), GreaterThan.class); + var field = as(op.left(), FieldAttribute.class); + assertThat(field.name(), equalTo("languages")); + + var literal = as(op.right(), Literal.class); + assertThat(literal.value(), equalTo(1)); + + var leftRel = as(filter.child(), EsRelation.class); + var rightRel = as(join.right(), EsRelation.class); + } + + /** + * Filter on on left side fields (outside the join key) should be pushed down + * Expects + * Project[[_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, gender{f}#9, hire_date{f}#14, job{f}#15, job.raw{f}#16, lang + * uage_code{r}#4, last_name{f}#11, long_noidx{f}#17, salary{f}#12, language_name{f}#19]] + * \_Join[LEFT,[language_code{r}#4],[language_code{r}#4],[language_code{f}#18]] + * |_EsqlProject[[_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, gender{f}#9, hire_date{f}#14, job{f}#15, job.raw{f}#16, lang + * uages{f}#10 AS language_code, last_name{f}#11, long_noidx{f}#17, salary{f}#12]] + * | \_Limit[1000[INTEGER]] + * | \_Filter[emp_no{f}#7 > 1[INTEGER]] + * | \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + * \_EsRelation[language_code][LOOKUP][language_code{f}#18, language_name{f}#19] + */ + public void testLookupJoinPushDownFilterOnLeftSideField() { + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); + + String query = """ + FROM test + | RENAME languages AS language_code + | LOOKUP JOIN language_code ON language_code + | WHERE emp_no > 1 + """; + + var plan = optimizedPlan(query); + + var project = as(plan, Project.class); + var join = as(project.child(), Join.class); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); + project = as(join.left(), Project.class); + + var limit = as(project.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(1000)); + var filter = as(limit.child(), Filter.class); + var op = as(filter.condition(), GreaterThan.class); + var field = as(op.left(), FieldAttribute.class); + assertThat(field.name(), equalTo("emp_no")); + + var literal = as(op.right(), Literal.class); + assertThat(literal.value(), equalTo(1)); + + var leftRel = as(filter.child(), EsRelation.class); + var rightRel = as(join.right(), EsRelation.class); + } + + /** + * Filter works on the right side fields and thus cannot be pushed down + * Expects + * Project[[_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, gender{f}#9, hire_date{f}#14, job{f}#15, job.raw{f}#16, lang + * uage_code{r}#4, last_name{f}#11, long_noidx{f}#17, salary{f}#12, language_name{f}#19]] + * \_Limit[1000[INTEGER]] + * \_Filter[language_name{f}#19 == [45 6e 67 6c 69 73 68][KEYWORD]] + * \_Join[LEFT,[language_code{r}#4],[language_code{r}#4],[language_code{f}#18]] + * |_EsqlProject[[_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, gender{f}#9, hire_date{f}#14, job{f}#15, job.raw{f}#16, lang + * uages{f}#10 AS language_code, last_name{f}#11, long_noidx{f}#17, salary{f}#12]] + * | \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + * \_EsRelation[language_code][LOOKUP][language_code{f}#18, language_name{f}#19] + */ + public void testLookupJoinPushDownDisabledForLookupField() { + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); + + String query = """ + FROM test + | RENAME languages AS language_code + | LOOKUP JOIN language_code ON language_code + | WHERE language_name == "English" + """; + + var plan = optimizedPlan(query); + + var project = as(plan, Project.class); + var limit = as(project.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(1000)); + + var filter = as(limit.child(), Filter.class); + var op = as(filter.condition(), Equals.class); + var field = as(op.left(), FieldAttribute.class); + assertThat(field.name(), equalTo("language_name")); + var literal = as(op.right(), Literal.class); + assertThat(literal.value(), equalTo(new BytesRef("English"))); + + var join = as(filter.child(), Join.class); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); + project = as(join.left(), Project.class); + + var leftRel = as(project.child(), EsRelation.class); + var rightRel = as(join.right(), EsRelation.class); + } + + /** + * Split the conjunction into pushable and non pushable filters. + * Expects + * Project[[_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, gender{f}#10, hire_date{f}#15, job{f}#16, job.raw{f}#17, lan + * guage_code{r}#4, last_name{f}#12, long_noidx{f}#18, salary{f}#13, language_name{f}#20]] + * \_Limit[1000[INTEGER]] + * \_Filter[language_name{f}#20 == [45 6e 67 6c 69 73 68][KEYWORD]] + * \_Join[LEFT,[language_code{r}#4],[language_code{r}#4],[language_code{f}#19]] + * |_EsqlProject[[_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, gender{f}#10, hire_date{f}#15, job{f}#16, job.raw{f}#17, lan + * guages{f}#11 AS language_code, last_name{f}#12, long_noidx{f}#18, salary{f}#13]] + * | \_Filter[emp_no{f}#8 > 1[INTEGER]] + * | \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + * \_EsRelation[language_code][LOOKUP][language_code{f}#19, language_name{f}#20] + */ + public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightField() { + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); + + String query = """ + FROM test + | RENAME languages AS language_code + | LOOKUP JOIN language_code ON language_code + | WHERE language_name == "English" AND emp_no > 1 + """; + + var plan = optimizedPlan(query); + + var project = as(plan, Project.class); + var limit = as(project.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(1000)); + // filter kept in place, working on the right side + var filter = as(limit.child(), Filter.class); + EsqlBinaryComparison op = as(filter.condition(), Equals.class); + var field = as(op.left(), FieldAttribute.class); + assertThat(field.name(), equalTo("language_name")); + var literal = as(op.right(), Literal.class); + assertThat(literal.value(), equalTo(new BytesRef("English"))); + + var join = as(filter.child(), Join.class); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); + project = as(join.left(), Project.class); + // filter pushed down + filter = as(project.child(), Filter.class); + op = as(filter.condition(), GreaterThan.class); + field = as(op.left(), FieldAttribute.class); + assertThat(field.name(), equalTo("emp_no")); + + literal = as(op.right(), Literal.class); + assertThat(literal.value(), equalTo(1)); + + var leftRel = as(filter.child(), EsRelation.class); + var rightRel = as(join.right(), EsRelation.class); + + } + + /** + * Disjunctions however keep the filter in place, even on pushable fields + * Expects + * Project[[_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, gender{f}#10, hire_date{f}#15, job{f}#16, job.raw{f}#17, lan + * guage_code{r}#4, last_name{f}#12, long_noidx{f}#18, salary{f}#13, language_name{f}#20]] + * \_Limit[1000[INTEGER]] + * \_Filter[language_name{f}#20 == [45 6e 67 6c 69 73 68][KEYWORD] OR emp_no{f}#8 > 1[INTEGER]] + * \_Join[LEFT,[language_code{r}#4],[language_code{r}#4],[language_code{f}#19]] + * |_EsqlProject[[_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, gender{f}#10, hire_date{f}#15, job{f}#16, job.raw{f}#17, lan + * guages{f}#11 AS language_code, last_name{f}#12, long_noidx{f}#18, salary{f}#13]] + * | \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + * \_EsRelation[language_code][LOOKUP][language_code{f}#19, language_name{f}#20] + */ + public void testLookupJoinPushDownDisabledForDisjunctionBetweenLeftAndRightField() { + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); + + String query = """ + FROM test + | RENAME languages AS language_code + | LOOKUP JOIN language_code ON language_code + | WHERE language_name == "English" OR emp_no > 1 + """; + + var plan = optimizedPlan(query); + + var project = as(plan, Project.class); + var limit = as(project.child(), Limit.class); + assertThat(limit.limit().fold(), equalTo(1000)); + + var filter = as(limit.child(), Filter.class); + var or = as(filter.condition(), Or.class); + EsqlBinaryComparison op = as(or.left(), Equals.class); + // OR left side + var field = as(op.left(), FieldAttribute.class); + assertThat(field.name(), equalTo("language_name")); + var literal = as(op.right(), Literal.class); + assertThat(literal.value(), equalTo(new BytesRef("English"))); + // OR right side + op = as(or.right(), GreaterThan.class); + field = as(op.left(), FieldAttribute.class); + assertThat(field.name(), equalTo("emp_no")); + literal = as(op.right(), Literal.class); + assertThat(literal.value(), equalTo(1)); + + var join = as(filter.child(), Join.class); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); + project = as(join.left(), Project.class); + + var leftRel = as(project.child(), EsRelation.class); + var rightRel = as(join.right(), EsRelation.class); + } + + // + // + // + public void testTranslateMetricsWithoutGrouping() { assumeTrue("requires snapshot builds", Build.current().isSnapshot()); var query = "METRICS k8s max(rate(network.total_bytes_in))"; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 317aa3ab6f5e9..70d5e10d34a73 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.core.Tuple; import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Polygon; @@ -114,6 +115,7 @@ import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; @@ -127,6 +129,7 @@ import org.elasticsearch.xpack.esql.stats.SearchStats; import org.junit.Before; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -153,6 +156,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.defaultLookupResolution; import static org.elasticsearch.xpack.esql.core.expression.Expressions.name; import static org.elasticsearch.xpack.esql.core.expression.Expressions.names; import static org.elasticsearch.xpack.esql.core.expression.function.scalar.FunctionTestUtils.l; @@ -279,16 +283,30 @@ TestDataSource makeTestDataSource( String indexName, String mappingFileName, EsqlFunctionRegistry functionRegistry, + IndexResolution lookupResolution, EnrichResolution enrichResolution, SearchStats stats ) { Map mapping = loadMapping(mappingFileName); EsIndex index = new EsIndex(indexName, mapping, Map.of("test", IndexMode.STANDARD)); IndexResolution getIndexResult = IndexResolution.valid(index); - Analyzer analyzer = new Analyzer(new AnalyzerContext(config, functionRegistry, getIndexResult, enrichResolution), TEST_VERIFIER); + Analyzer analyzer = new Analyzer( + new AnalyzerContext(config, functionRegistry, getIndexResult, lookupResolution, enrichResolution), + TEST_VERIFIER + ); return new TestDataSource(mapping, index, analyzer, stats); } + TestDataSource makeTestDataSource( + String indexName, + String mappingFileName, + EsqlFunctionRegistry functionRegistry, + EnrichResolution enrichResolution, + SearchStats stats + ) { + return makeTestDataSource(indexName, mappingFileName, functionRegistry, defaultLookupResolution(), enrichResolution, stats); + } + TestDataSource makeTestDataSource( String indexName, String mappingFileName, @@ -2286,6 +2304,93 @@ public void testFieldExtractWithoutSourceAttributes() { ); } + public void testVerifierOnMissingReferences() { + var plan = physicalPlan(""" + from test + | stats s = sum(salary) by emp_no + | where emp_no > 10 + """); + + plan = plan.transformUp( + AggregateExec.class, + a -> new AggregateExec( + a.source(), + a.child(), + a.groupings(), + List.of(), // remove the aggs (and thus the groupings) entirely + a.getMode(), + a.intermediateAttributes(), + a.estimatedRowSize() + ) + ); + final var finalPlan = plan; + var e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(finalPlan)); + assertThat(e.getMessage(), containsString(" > 10[INTEGER]]] optimized incorrectly due to missing references [emp_no{f}#")); + } + + public void testVerifierOnMissingReferencesWithBinaryPlans() throws Exception { + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V5.isEnabled()); + + // Do not assert serialization: + // This will have a LookupJoinExec, which is not serializable because it doesn't leave the coordinator. + var plan = physicalPlan(""" + FROM test + | RENAME languages AS language_code + | SORT language_code + | LOOKUP JOIN languages_lookup ON language_code + """, testData, false); + + var planWithInvalidJoinLeftSide = plan.transformUp(LookupJoinExec.class, join -> join.replaceChildren(join.right(), join.right())); + + var e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(planWithInvalidJoinLeftSide)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from left hand side [language_code")); + + var planWithInvalidJoinRightSide = plan.transformUp( + LookupJoinExec.class, + // LookupJoinExec.rightReferences() is currently EMPTY (hack); use a HashJoinExec instead. + join -> new HashJoinExec( + join.source(), + join.left(), + join.left(), + join.leftFields(), + join.leftFields(), + join.rightFields(), + join.output() + ) + ); + + e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(planWithInvalidJoinRightSide)); + assertThat(e.getMessage(), containsString(" optimized incorrectly due to missing references from right hand side [language_code")); + } + + public void testVerifierOnDuplicateOutputAttributes() { + var plan = physicalPlan(""" + from test + | stats s = sum(salary) by emp_no + | where emp_no > 10 + """); + + plan = plan.transformUp(AggregateExec.class, a -> { + List intermediates = new ArrayList<>(a.intermediateAttributes()); + intermediates.add(intermediates.get(0)); + return new AggregateExec( + a.source(), + a.child(), + a.groupings(), + a.aggregates(), + AggregatorMode.INTERMEDIATE, // FINAL would deduplicate aggregates() + intermediates, + a.estimatedRowSize() + ); + }); + final var finalPlan = plan; + var e = expectThrows(IllegalStateException.class, () -> physicalPlanOptimizer.verify(finalPlan)); + assertThat( + e.getMessage(), + containsString("Plan [LimitExec[1000[INTEGER]]] optimized incorrectly due to duplicate output attribute emp_no{f}#") + ); + } + public void testProjectAwayColumns() { var rule = new ProjectAwayColumns(); @@ -2557,7 +2662,7 @@ public boolean exists(String field) { var exchange = asRemoteExchange(aggregate.child()); var localSourceExec = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(localSourceExec.output()), contains("languages", "min", "seen")); + assertThat(Expressions.names(localSourceExec.output()), contains("languages", "$$m$min", "$$m$seen")); } /** @@ -2593,9 +2698,9 @@ public void testPartialAggFoldingOutput() { var limit = as(optimized, LimitExec.class); var agg = as(limit.child(), AggregateExec.class); var exchange = as(agg.child(), ExchangeExec.class); - assertThat(Expressions.names(exchange.output()), contains("count", "seen")); + assertThat(Expressions.names(exchange.output()), contains("$$c$count", "$$c$seen")); var source = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(source.output()), contains("count", "seen")); + assertThat(Expressions.names(source.output()), contains("$$c$count", "$$c$seen")); } /** @@ -2627,7 +2732,7 @@ public void testGlobalAggFoldingOutput() { var aggFinal = as(limit.child(), AggregateExec.class); var aggPartial = as(aggFinal.child(), AggregateExec.class); // The partial aggregation's output is determined via AbstractPhysicalOperationProviders.intermediateAttributes() - assertThat(Expressions.names(aggPartial.output()), contains("count", "seen")); + assertThat(Expressions.names(aggPartial.output()), contains("$$c$count", "$$c$seen")); limit = as(aggPartial.child(), LimitExec.class); var exchange = as(limit.child(), ExchangeExec.class); var project = as(exchange.child(), ProjectExec.class); @@ -2665,9 +2770,15 @@ public void testPartialAggFoldingOutputForSyntheticAgg() { var aggFinal = as(limit.child(), AggregateExec.class); assertThat(aggFinal.output(), hasSize(2)); var exchange = as(aggFinal.child(), ExchangeExec.class); - assertThat(Expressions.names(exchange.output()), contains("sum", "seen", "count", "seen")); + assertThat( + Expressions.names(exchange.output()), + contains("$$SUM$a$0$sum", "$$SUM$a$0$seen", "$$COUNT$a$1$count", "$$COUNT$a$1$seen") + ); var source = as(exchange.child(), LocalSourceExec.class); - assertThat(Expressions.names(source.output()), contains("sum", "seen", "count", "seen")); + assertThat( + Expressions.names(source.output()), + contains("$$SUM$a$0$sum", "$$SUM$a$0$seen", "$$COUNT$a$1$count", "$$COUNT$a$1$seen") + ); } /** @@ -6706,11 +6817,17 @@ private PhysicalPlan physicalPlan(String query) { } private PhysicalPlan physicalPlan(String query, TestDataSource dataSource) { + return physicalPlan(query, dataSource, true); + } + + private PhysicalPlan physicalPlan(String query, TestDataSource dataSource, boolean assertSerialization) { var logical = logicalOptimizer.optimize(dataSource.analyzer.analyze(parser.createStatement(query))); // System.out.println("Logical\n" + logical); var physical = mapper.map(logical); // System.out.println(physical); - assertSerialization(physical); + if (assertSerialization) { + assertSerialization(physical); + } return physical; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java index 3dfc0f611eb2b..f2a619f0dbd89 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java @@ -65,7 +65,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected ClusterComputeRequest createTestInstance() { var sessionId = randomAlphaOfLength(10); String query = randomQuery(); - PhysicalPlan physicalPlan = DataNodeRequestTests.mapAndMaybeOptimize(parse(query)); + PhysicalPlan physicalPlan = DataNodeRequestSerializationTests.mapAndMaybeOptimize(parse(query)); OriginalIndices originalIndices = new OriginalIndices( generateRandomStringArray(10, 10, false, false), IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestSerializationTests.java new file mode 100644 index 0000000000000..d1ce064c35d81 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestSerializationTests.java @@ -0,0 +1,289 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer; +import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.planner.mapper.Mapper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomConfiguration; +import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomTables; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_CFG; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.emptyPolicyResolution; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; + +public class DataNodeRequestSerializationTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return DataNodeRequest::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + List writeables = new ArrayList<>(); + writeables.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); + writeables.addAll(new EsqlPlugin().getNamedWriteables()); + return new NamedWriteableRegistry(writeables); + } + + @Override + protected DataNodeRequest createTestInstance() { + var sessionId = randomAlphaOfLength(10); + String query = randomFrom(""" + from test + | where round(emp_no) > 10 + | eval c = salary + | stats x = avg(c) + """, """ + from test + | sort last_name + | limit 10 + | where round(emp_no) > 10 + | eval c = first_name + | stats x = avg(salary) + """); + List shardIds = randomList(1, 10, () -> new ShardId("index-" + between(1, 10), "n/a", between(1, 10))); + PhysicalPlan physicalPlan = mapAndMaybeOptimize(parse(query)); + Map aliasFilters = Map.of( + new Index("concrete-index", "n/a"), + AliasFilter.of(new TermQueryBuilder("id", "1"), "alias-1") + ); + DataNodeRequest request = new DataNodeRequest( + sessionId, + randomConfiguration(query, randomTables()), + randomAlphaOfLength(10), + shardIds, + aliasFilters, + physicalPlan, + generateRandomStringArray(10, 10, false, false), + IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()) + ); + request.setParentTask(randomAlphaOfLength(10), randomNonNegativeLong()); + return request; + } + + @Override + protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException { + return switch (between(0, 8)) { + case 0 -> { + var request = new DataNodeRequest( + randomAlphaOfLength(20), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan(), + in.indices(), + in.indicesOptions() + ); + request.setParentTask(in.getParentTask()); + yield request; + } + case 1 -> { + var request = new DataNodeRequest( + in.sessionId(), + randomConfiguration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan(), + in.indices(), + in.indicesOptions() + ); + request.setParentTask(in.getParentTask()); + yield request; + } + case 2 -> { + List shardIds = randomList(1, 10, () -> new ShardId("new-index-" + between(1, 10), "n/a", between(1, 10))); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + shardIds, + in.aliasFilters(), + in.plan(), + in.indices(), + in.indicesOptions() + ); + request.setParentTask(in.getParentTask()); + yield request; + } + case 3 -> { + String newQuery = randomFrom(""" + from test + | where round(emp_no) > 100 + | eval c = salary + | stats x = avg(c) + """, """ + from test + | sort last_name + | limit 10 + | where round(emp_no) > 100 + | eval c = first_name + | stats x = avg(salary) + """); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + mapAndMaybeOptimize(parse(newQuery)), + in.indices(), + in.indicesOptions() + ); + request.setParentTask(in.getParentTask()); + yield request; + } + case 4 -> { + final Map aliasFilters; + if (randomBoolean()) { + aliasFilters = Map.of(); + } else { + aliasFilters = Map.of(new Index("concrete-index", "n/a"), AliasFilter.of(new TermQueryBuilder("id", "2"), "alias-2")); + } + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + aliasFilters, + in.plan(), + in.indices(), + in.indicesOptions() + ); + request.setParentTask(request.getParentTask()); + yield request; + } + case 5 -> { + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan(), + in.indices(), + in.indicesOptions() + ); + request.setParentTask( + randomValueOtherThan(request.getParentTask().getNodeId(), () -> randomAlphaOfLength(10)), + randomNonNegativeLong() + ); + yield request; + } + case 6 -> { + var clusterAlias = randomValueOtherThan(in.clusterAlias(), () -> randomAlphaOfLength(10)); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + clusterAlias, + in.shardIds(), + in.aliasFilters(), + in.plan(), + in.indices(), + in.indicesOptions() + ); + request.setParentTask(request.getParentTask()); + yield request; + } + case 7 -> { + var indices = randomValueOtherThan(in.indices(), () -> generateRandomStringArray(10, 10, false, false)); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan(), + indices, + in.indicesOptions() + ); + request.setParentTask(request.getParentTask()); + yield request; + } + case 8 -> { + var indicesOptions = randomValueOtherThan( + in.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()) + ); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan(), + in.indices(), + indicesOptions + ); + request.setParentTask(request.getParentTask()); + yield request; + } + default -> throw new AssertionError("invalid value"); + }; + } + + static LogicalPlan parse(String query) { + Map mapping = loadMapping("mapping-basic.json"); + EsIndex test = new EsIndex("test", mapping, Map.of("test", IndexMode.STANDARD)); + IndexResolution getIndexResult = IndexResolution.valid(test); + var logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(TEST_CFG)); + var analyzer = new Analyzer( + new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, emptyPolicyResolution()), + TEST_VERIFIER + ); + return logicalOptimizer.optimize(analyzer.analyze(new EsqlParser().createStatement(query))); + } + + static PhysicalPlan mapAndMaybeOptimize(LogicalPlan logicalPlan) { + var physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(TEST_CFG)); + var mapper = new Mapper(); + var physical = mapper.map(logicalPlan); + if (randomBoolean()) { + physical = physicalPlanOptimizer.optimize(physical); + } + return physical; + } + + @Override + protected List filteredWarnings() { + return withDefaultLimitWarning(super.filteredWarnings()); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java index 4553551c40cd3..d0c5ddd0dc927 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java @@ -8,282 +8,48 @@ package org.elasticsearch.xpack.esql.plugin; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.analysis.Analyzer; -import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; -import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; -import org.elasticsearch.xpack.esql.index.EsIndex; -import org.elasticsearch.xpack.esql.index.IndexResolution; -import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; -import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; -import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerContext; -import org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer; -import org.elasticsearch.xpack.esql.parser.EsqlParser; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.planner.mapper.Mapper; +import org.elasticsearch.test.ESTestCase; -import java.io.IOException; -import java.util.ArrayList; +import java.util.Collections; import java.util.List; -import java.util.Map; +import static org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField.NO_INDEX_PLACEHOLDER; import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomConfiguration; import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomTables; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_CFG; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.emptyPolicyResolution; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; -public class DataNodeRequestTests extends AbstractWireSerializingTestCase { +public class DataNodeRequestTests extends ESTestCase { - @Override - protected Writeable.Reader instanceReader() { - return DataNodeRequest::new; - } - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - List writeables = new ArrayList<>(); - writeables.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); - writeables.addAll(new EsqlPlugin().getNamedWriteables()); - return new NamedWriteableRegistry(writeables); - } - - @Override - protected DataNodeRequest createTestInstance() { + public void testNoIndexPlaceholder() { var sessionId = randomAlphaOfLength(10); - String query = randomFrom(""" - from test - | where round(emp_no) > 10 - | eval c = salary - | stats x = avg(c) - """, """ - from test - | sort last_name - | limit 10 - | where round(emp_no) > 10 - | eval c = first_name - | stats x = avg(salary) - """); List shardIds = randomList(1, 10, () -> new ShardId("index-" + between(1, 10), "n/a", between(1, 10))); - PhysicalPlan physicalPlan = mapAndMaybeOptimize(parse(query)); - Map aliasFilters = Map.of( - new Index("concrete-index", "n/a"), - AliasFilter.of(new TermQueryBuilder("id", "1"), "alias-1") - ); + DataNodeRequest request = new DataNodeRequest( sessionId, - randomConfiguration(query, randomTables()), + randomConfiguration(""" + from test + | where round(emp_no) > 10 + | eval c = salary + | stats x = avg(c) + """, randomTables()), randomAlphaOfLength(10), shardIds, - aliasFilters, - physicalPlan, + Collections.emptyMap(), + null, generateRandomStringArray(10, 10, false, false), IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()) ); - request.setParentTask(randomAlphaOfLength(10), randomNonNegativeLong()); - return request; - } - @Override - protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException { - return switch (between(0, 8)) { - case 0 -> { - var request = new DataNodeRequest( - randomAlphaOfLength(20), - in.configuration(), - in.clusterAlias(), - in.shardIds(), - in.aliasFilters(), - in.plan(), - in.indices(), - in.indicesOptions() - ); - request.setParentTask(in.getParentTask()); - yield request; - } - case 1 -> { - var request = new DataNodeRequest( - in.sessionId(), - randomConfiguration(), - in.clusterAlias(), - in.shardIds(), - in.aliasFilters(), - in.plan(), - in.indices(), - in.indicesOptions() - ); - request.setParentTask(in.getParentTask()); - yield request; - } - case 2 -> { - List shardIds = randomList(1, 10, () -> new ShardId("new-index-" + between(1, 10), "n/a", between(1, 10))); - var request = new DataNodeRequest( - in.sessionId(), - in.configuration(), - in.clusterAlias(), - shardIds, - in.aliasFilters(), - in.plan(), - in.indices(), - in.indicesOptions() - ); - request.setParentTask(in.getParentTask()); - yield request; - } - case 3 -> { - String newQuery = randomFrom(""" - from test - | where round(emp_no) > 100 - | eval c = salary - | stats x = avg(c) - """, """ - from test - | sort last_name - | limit 10 - | where round(emp_no) > 100 - | eval c = first_name - | stats x = avg(salary) - """); - var request = new DataNodeRequest( - in.sessionId(), - in.configuration(), - in.clusterAlias(), - in.shardIds(), - in.aliasFilters(), - mapAndMaybeOptimize(parse(newQuery)), - in.indices(), - in.indicesOptions() - ); - request.setParentTask(in.getParentTask()); - yield request; - } - case 4 -> { - final Map aliasFilters; - if (randomBoolean()) { - aliasFilters = Map.of(); - } else { - aliasFilters = Map.of(new Index("concrete-index", "n/a"), AliasFilter.of(new TermQueryBuilder("id", "2"), "alias-2")); - } - var request = new DataNodeRequest( - in.sessionId(), - in.configuration(), - in.clusterAlias(), - in.shardIds(), - aliasFilters, - in.plan(), - in.indices(), - in.indicesOptions() - ); - request.setParentTask(request.getParentTask()); - yield request; - } - case 5 -> { - var request = new DataNodeRequest( - in.sessionId(), - in.configuration(), - in.clusterAlias(), - in.shardIds(), - in.aliasFilters(), - in.plan(), - in.indices(), - in.indicesOptions() - ); - request.setParentTask( - randomValueOtherThan(request.getParentTask().getNodeId(), () -> randomAlphaOfLength(10)), - randomNonNegativeLong() - ); - yield request; - } - case 6 -> { - var clusterAlias = randomValueOtherThan(in.clusterAlias(), () -> randomAlphaOfLength(10)); - var request = new DataNodeRequest( - in.sessionId(), - in.configuration(), - clusterAlias, - in.shardIds(), - in.aliasFilters(), - in.plan(), - in.indices(), - in.indicesOptions() - ); - request.setParentTask(request.getParentTask()); - yield request; - } - case 7 -> { - var indices = randomValueOtherThan(in.indices(), () -> generateRandomStringArray(10, 10, false, false)); - var request = new DataNodeRequest( - in.sessionId(), - in.configuration(), - in.clusterAlias(), - in.shardIds(), - in.aliasFilters(), - in.plan(), - indices, - in.indicesOptions() - ); - request.setParentTask(request.getParentTask()); - yield request; - } - case 8 -> { - var indicesOptions = randomValueOtherThan( - in.indicesOptions(), - () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()) - ); - var request = new DataNodeRequest( - in.sessionId(), - in.configuration(), - in.clusterAlias(), - in.shardIds(), - in.aliasFilters(), - in.plan(), - in.indices(), - indicesOptions - ); - request.setParentTask(request.getParentTask()); - yield request; - } - default -> throw new AssertionError("invalid value"); - }; - } + assertThat(request.shardIds(), equalTo(shardIds)); - static LogicalPlan parse(String query) { - Map mapping = loadMapping("mapping-basic.json"); - EsIndex test = new EsIndex("test", mapping, Map.of("test", IndexMode.STANDARD)); - IndexResolution getIndexResult = IndexResolution.valid(test); - var logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(TEST_CFG)); - var analyzer = new Analyzer( - new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, emptyPolicyResolution()), - TEST_VERIFIER - ); - return logicalOptimizer.optimize(analyzer.analyze(new EsqlParser().createStatement(query))); - } + request.indices(generateRandomStringArray(10, 10, false, false)); - static PhysicalPlan mapAndMaybeOptimize(LogicalPlan logicalPlan) { - var physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(TEST_CFG)); - var mapper = new Mapper(); - var physical = mapper.map(logicalPlan); - if (randomBoolean()) { - physical = physicalPlanOptimizer.optimize(physical); - } - return physical; - } + assertThat(request.shardIds(), equalTo(shardIds)); + + request.indices(NO_INDEX_PLACEHOLDER); - @Override - protected List filteredWarnings() { - return withDefaultLimitWarning(super.filteredWarnings()); + assertThat(request.shardIds(), empty()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java index 60b632c443f8e..1000c05282fdb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java @@ -8,10 +8,18 @@ package org.elasticsearch.xpack.esql.session; import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.indices.IndicesExpressionGrouper; +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.NoSeedNodeLeftException; @@ -20,9 +28,11 @@ import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; +import org.elasticsearch.xpack.esql.analysis.TableInfo; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.plan.TableIdentifier; import org.elasticsearch.xpack.esql.type.EsFieldTests; import java.util.ArrayList; @@ -32,8 +42,12 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.LongSupplier; import java.util.function.Predicate; +import java.util.stream.Collectors; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; +import static org.elasticsearch.xpack.esql.session.EsqlSessionCCSUtils.checkForCcsLicense; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -627,4 +641,148 @@ public void testMissingIndicesIsFatal() { } } + + public void testCheckForCcsLicense() { + final TestIndicesExpressionGrouper indicesGrouper = new TestIndicesExpressionGrouper(); + + // this seems to be used only for tracking usage of features, not for checking if a license is expired + final LongSupplier currTime = () -> System.currentTimeMillis(); + + XPackLicenseState enterpriseLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.ENTERPRISE)); + XPackLicenseState trialLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.TRIAL)); + XPackLicenseState platinumLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.PLATINUM)); + XPackLicenseState goldLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.GOLD)); + XPackLicenseState basicLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.BASIC)); + XPackLicenseState standardLicenseValid = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.STANDARD)); + XPackLicenseState missingLicense = new XPackLicenseState(currTime, activeLicenseStatus(License.OperationMode.MISSING)); + XPackLicenseState nullLicense = null; + + final XPackLicenseStatus enterpriseStatus = inactiveLicenseStatus(License.OperationMode.ENTERPRISE); + XPackLicenseState enterpriseLicenseInactive = new XPackLicenseState(currTime, enterpriseStatus); + XPackLicenseState trialLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.TRIAL)); + XPackLicenseState platinumLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.PLATINUM)); + XPackLicenseState goldLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.GOLD)); + XPackLicenseState basicLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.BASIC)); + XPackLicenseState standardLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.STANDARD)); + XPackLicenseState missingLicenseInactive = new XPackLicenseState(currTime, inactiveLicenseStatus(License.OperationMode.MISSING)); + + // local only search does not require an enterprise license + { + List indices = new ArrayList<>(); + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, randomFrom("idx", "idx1,idx2*")))); + + checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(indices, indicesGrouper, platinumLicenseValid); + checkForCcsLicense(indices, indicesGrouper, goldLicenseValid); + checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); + checkForCcsLicense(indices, indicesGrouper, basicLicenseValid); + checkForCcsLicense(indices, indicesGrouper, standardLicenseValid); + checkForCcsLicense(indices, indicesGrouper, missingLicense); + checkForCcsLicense(indices, indicesGrouper, nullLicense); + + checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, platinumLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, goldLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, trialLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, basicLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, standardLicenseInactive); + checkForCcsLicense(indices, indicesGrouper, missingLicenseInactive); + } + + // cross-cluster search requires a valid (active, non-expired) enterprise license OR a valid trial license + { + List indices = new ArrayList<>(); + final String indexExprWithRemotes = randomFrom("remote:idx", "idx1,remote:idx2*,remote:logs,c*:idx4"); + if (randomBoolean()) { + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, indexExprWithRemotes))); + } else { + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, randomFrom("idx", "idx1,idx2*")))); + indices.add(new TableInfo(new TableIdentifier(EMPTY, null, indexExprWithRemotes))); + } + + // licenses that work + checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); + + // all others fail --- + + // active non-expired non-Enterprise non-Trial licenses + assertLicenseCheckFails(indices, indicesGrouper, platinumLicenseValid, "active platinum license"); + assertLicenseCheckFails(indices, indicesGrouper, goldLicenseValid, "active gold license"); + assertLicenseCheckFails(indices, indicesGrouper, basicLicenseValid, "active basic license"); + assertLicenseCheckFails(indices, indicesGrouper, standardLicenseValid, "active standard license"); + assertLicenseCheckFails(indices, indicesGrouper, missingLicense, "active missing license"); + assertLicenseCheckFails(indices, indicesGrouper, nullLicense, "none"); + + // inactive/expired licenses + assertLicenseCheckFails(indices, indicesGrouper, enterpriseLicenseInactive, "expired enterprise license"); + assertLicenseCheckFails(indices, indicesGrouper, trialLicenseInactive, "expired trial license"); + assertLicenseCheckFails(indices, indicesGrouper, platinumLicenseInactive, "expired platinum license"); + assertLicenseCheckFails(indices, indicesGrouper, goldLicenseInactive, "expired gold license"); + assertLicenseCheckFails(indices, indicesGrouper, basicLicenseInactive, "expired basic license"); + assertLicenseCheckFails(indices, indicesGrouper, standardLicenseInactive, "expired standard license"); + assertLicenseCheckFails(indices, indicesGrouper, missingLicenseInactive, "expired missing license"); + } + } + + private XPackLicenseStatus activeLicenseStatus(License.OperationMode operationMode) { + return new XPackLicenseStatus(operationMode, true, null); + } + + private XPackLicenseStatus inactiveLicenseStatus(License.OperationMode operationMode) { + return new XPackLicenseStatus(operationMode, false, "License Expired 123"); + } + + private void assertLicenseCheckFails( + List indices, + TestIndicesExpressionGrouper indicesGrouper, + XPackLicenseState licenseState, + String expectedErrorMessageSuffix + ) { + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> checkForCcsLicense(indices, indicesGrouper, licenseState) + ); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat( + e.getMessage(), + equalTo( + "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: " + expectedErrorMessageSuffix + ) + ); + } + + static class TestIndicesExpressionGrouper implements IndicesExpressionGrouper { + @Override + public Map groupIndices(IndicesOptions indicesOptions, String[] indexExpressions) { + final Map originalIndicesMap = new HashMap<>(); + final String localKey = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + + for (String expr : indexExpressions) { + assertFalse(Strings.isNullOrBlank(expr)); + String[] split = expr.split(":", 2); + assertTrue("Bad index expression: " + expr, split.length < 3); + String clusterAlias; + String indexExpr; + if (split.length == 1) { + clusterAlias = localKey; + indexExpr = expr; + } else { + clusterAlias = split[0]; + indexExpr = split[1]; + + } + OriginalIndices currIndices = originalIndicesMap.get(clusterAlias); + if (currIndices == null) { + originalIndicesMap.put(clusterAlias, new OriginalIndices(new String[] { indexExpr }, indicesOptions)); + } else { + List indicesList = Arrays.stream(currIndices.indices()).collect(Collectors.toList()); + indicesList.add(indexExpr); + originalIndicesMap.put(clusterAlias, new OriginalIndices(indicesList.toArray(new String[0]), indicesOptions)); + } + } + return originalIndicesMap; + } + } + } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index 8ba88865e361a..89d80cf34aec5 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -469,27 +468,25 @@ public void testCanMatch() throws IOException { ).canMatch() ); - expectThrows(SearchContextMissingException.class, () -> { - ShardSearchContextId withoutCommitId = new ShardSearchContextId(contextId.getSessionId(), contextId.getId(), null); - sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00")); - assertFalse( - searchService.canMatch( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - shard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1f, - -1, - null, - withoutCommitId, - null - ) - ).canMatch() - ); - }); + ShardSearchContextId withoutCommitId = new ShardSearchContextId(contextId.getSessionId(), contextId.getId(), null); + sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00")); + assertTrue( + searchService.canMatch( + new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1f, + -1, + null, + withoutCommitId, + null + ) + ).canMatch() + ); } } diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java index 5168cd11eb172..a5d966873dda1 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ilm/CCRIndexLifecycleIT.java @@ -38,7 +38,6 @@ import java.util.Optional; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.ilm.ShrinkIndexNameSupplier.SHRUNKEN_INDEX_PREFIX; import static org.hamcrest.Matchers.equalTo; @@ -762,8 +761,8 @@ private void assertDocumentExists(RestClient client, String index, String id) th } private void createNewSingletonPolicy(String policyName, String phaseName, LifecycleAction action, TimeValue after) throws IOException { - Phase phase = new Phase(phaseName, after, singletonMap(action.getWriteableName(), action)); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, singletonMap(phase.getName(), phase)); + Phase phase = new Phase(phaseName, after, Map.of(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java index 60e71b095039e..811d07a436677 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/MigrateToDataTiersIT.java @@ -46,7 +46,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createNewSingletonPolicy; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createPolicy; @@ -101,11 +100,11 @@ public void testMigrateToDataTiersAction() throws Exception { Map warmActions = new HashMap<>(); warmActions.put(SetPriorityAction.NAME, new SetPriorityAction(50)); warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1, null)); - warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, singletonMap("data", "warm"), null, null)); + warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, Map.of("data", "warm"), null, null)); warmActions.put(ShrinkAction.NAME, new ShrinkAction(1, null, false)); Map coldActions = new HashMap<>(); coldActions.put(SetPriorityAction.NAME, new SetPriorityAction(0)); - coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, singletonMap("data", "cold"))); + coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, Map.of("data", "cold"))); createPolicy( client(), @@ -114,7 +113,7 @@ public void testMigrateToDataTiersAction() throws Exception { new Phase("warm", TimeValue.ZERO, warmActions), new Phase("cold", TimeValue.timeValueDays(100), coldActions), null, - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) ); createIndexWithSettings( @@ -377,11 +376,11 @@ public void testMigrationDryRun() throws Exception { Map warmActions = new HashMap<>(); warmActions.put(SetPriorityAction.NAME, new SetPriorityAction(50)); warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1, null)); - warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, singletonMap("data", "warm"), null, null)); + warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, Map.of("data", "warm"), null, null)); warmActions.put(ShrinkAction.NAME, new ShrinkAction(1, null, false)); Map coldActions = new HashMap<>(); coldActions.put(SetPriorityAction.NAME, new SetPriorityAction(0)); - coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, singletonMap("data", "cold"))); + coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null, Map.of("data", "cold"))); createPolicy( client(), @@ -390,7 +389,7 @@ public void testMigrationDryRun() throws Exception { new Phase("warm", TimeValue.ZERO, warmActions), new Phase("cold", TimeValue.timeValueDays(100), coldActions), null, - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) ); createIndexWithSettings( diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java index 3949139db033b..a1c7ebc2d8b2c 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/TimeSeriesRestDriver.java @@ -41,7 +41,6 @@ import java.io.IOException; import java.io.InputStream; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -50,7 +49,6 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static java.util.Collections.singletonMap; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; import static org.elasticsearch.test.ESTestCase.randomBoolean; @@ -154,8 +152,8 @@ public static void createNewSingletonPolicy( LifecycleAction action, TimeValue after ) throws IOException { - Phase phase = new Phase(phaseName, after, singletonMap(action.getWriteableName(), action)); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, singletonMap(phase.getName(), phase)); + Phase phase = new Phase(phaseName, after, Map.of(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); @@ -202,7 +200,7 @@ public static void createFullPolicy(RestClient client, String policyName, TimeVa new AllocateAction( 1, null, - singletonMap("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), + Map.of("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), null, null ) @@ -215,7 +213,7 @@ public static void createFullPolicy(RestClient client, String policyName, TimeVa new AllocateAction( 0, null, - singletonMap("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), + Map.of("_name", "javaRestTest-0,javaRestTest-1," + "javaRestTest-2," + "javaRestTest-3"), null, null ) @@ -224,7 +222,7 @@ public static void createFullPolicy(RestClient client, String policyName, TimeVa phases.put("hot", new Phase("hot", hotTime, hotActions)); phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions)); phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); - phases.put("delete", new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); + phases.put("delete", new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policyName, phases); // PUT policy XContentBuilder builder = jsonBuilder(); @@ -300,7 +298,7 @@ public static Map getOnlyIndexSettings(RestClient client, String Map responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); Map indexSettings = (Map) responseMap.get(index); if (indexSettings == null) { - return Collections.emptyMap(); + return Map.of(); } return (Map) indexSettings.get("settings"); } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java index 7f75b010346ad..370e00785e843 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ChangePolicyForIndexIT.java @@ -32,7 +32,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createNewSingletonPolicy; @@ -67,7 +66,7 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "hot", TimeValue.ZERO, - singletonMap(RolloverAction.NAME, new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)) + Map.of(RolloverAction.NAME, new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)) ) ); phases1.put( @@ -75,7 +74,7 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "warm", TimeValue.ZERO, - singletonMap(AllocateAction.NAME, new AllocateAction(1, null, singletonMap("_name", "foobarbaz"), null, null)) + Map.of(AllocateAction.NAME, new AllocateAction(1, null, Map.of("_name", "foobarbaz"), null, null)) ) ); LifecyclePolicy lifecyclePolicy1 = new LifecyclePolicy("policy_1", phases1); @@ -85,7 +84,7 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "hot", TimeValue.ZERO, - singletonMap(RolloverAction.NAME, new RolloverAction(null, null, null, 1000L, null, null, null, null, null, null)) + Map.of(RolloverAction.NAME, new RolloverAction(null, null, null, 1000L, null, null, null, null, null, null)) ) ); phases2.put( @@ -93,15 +92,9 @@ public void testChangePolicyForIndex() throws Exception { new Phase( "warm", TimeValue.ZERO, - singletonMap( + Map.of( AllocateAction.NAME, - new AllocateAction( - 1, - null, - singletonMap("_name", "javaRestTest-0,javaRestTest-1,javaRestTest-2,javaRestTest-3"), - null, - null - ) + new AllocateAction(1, null, Map.of("_name", "javaRestTest-0,javaRestTest-1,javaRestTest-2,javaRestTest-3"), null, null) ) ) ); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 2b722a6555a08..4c53d711ffdef 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -58,7 +58,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createFullPolicy; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; @@ -219,7 +218,7 @@ public void testAllocateOnlyAllocation() throws Exception { Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) ); String allocateNodeName = "javaRestTest-0,javaRestTest-1,javaRestTest-2,javaRestTest-3"; - AllocateAction allocateAction = new AllocateAction(null, null, singletonMap("_name", allocateNodeName), null, null); + AllocateAction allocateAction = new AllocateAction(null, null, Map.of("_name", allocateNodeName), null, null); String endPhase = randomFrom("warm", "cold"); createNewSingletonPolicy(client(), policy, endPhase, allocateAction); updatePolicy(client(), index, policy); @@ -978,7 +977,7 @@ public void testHaltAtEndOfPhase() throws Exception { hotActions.put(SetPriorityAction.NAME, new SetPriorityAction(100)); Map phases = new HashMap<>(); phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions)); - phases.put("delete", new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); + phases.put("delete", new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE))); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); // PUT policy XContentBuilder builder = jsonBuilder(); @@ -1004,7 +1003,7 @@ public void testDeleteActionDoesntDeleteSearchableSnapshot() throws Exception { phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); phases.put( "delete", - new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, DeleteAction.NO_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.timeValueMillis(10000), Map.of(DeleteAction.NAME, DeleteAction.NO_SNAPSHOT_DELETE)) ); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); // PUT policy diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java index fefeaa95319ed..61fea054b7293 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java @@ -47,7 +47,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createComposableTemplate; @@ -185,7 +184,7 @@ public void testDeleteActionDeletesSearchableSnapshot() throws Exception { Map coldActions = Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo)); Map phases = new HashMap<>(); phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); - phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE))); + phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), Map.of(DeleteAction.NAME, WITH_SNAPSHOT_DELETE))); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); // PUT policy XContentBuilder builder = jsonBuilder(); @@ -455,7 +454,7 @@ public void testIdenticalSearchableSnapshotActionIsNoop() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -516,12 +515,12 @@ public void testConvertingSearchableSnapshotFromFullToPartial() throws Exception new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -586,7 +585,7 @@ public void testResumingSearchableSnapshotFromFullToPartial() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null, null @@ -600,12 +599,12 @@ public void testResumingSearchableSnapshotFromFullToPartial() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -664,14 +663,14 @@ public void testResumingSearchableSnapshotFromFullToPartial() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) ); assertBusy(() -> { logger.info("--> waiting for [{}] to be deleted...", partiallyMountedIndexName); @@ -695,7 +694,7 @@ public void testResumingSearchableSnapshotFromPartialToFull() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null, null @@ -710,12 +709,12 @@ public void testResumingSearchableSnapshotFromPartialToFull() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null ); @@ -775,10 +774,10 @@ public void testResumingSearchableSnapshotFromPartialToFull() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), null, - new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) + new Phase("delete", TimeValue.ZERO, Map.of(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) ); assertBusy(() -> { logger.info("--> waiting for [{}] to be deleted...", restoredPartiallyMountedIndexName); @@ -803,12 +802,12 @@ public void testSecondSearchableSnapshotUsingDifferentRepoThrows() throws Except new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(secondRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(secondRepo, randomBoolean())) ), null ) @@ -934,12 +933,12 @@ public void testSearchableSnapshotTotalShardsPerNode() throws Exception { new Phase( "cold", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) ), new Phase( "frozen", TimeValue.ZERO, - singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean(), totalShardsPerNode)) + Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean(), totalShardsPerNode)) ), null ); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java index d2f2dbbd0c9fb..2fecf3c617ccd 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java @@ -39,7 +39,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings; @@ -286,7 +285,7 @@ public void testSetSingleNodeAllocationRetriesUntilItSucceeds() throws Exception TimeValue.ZERO, Map.of(migrateAction.getWriteableName(), migrateAction, shrinkAction.getWriteableName(), shrinkAction) ); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); diff --git a/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java index 9460500177616..12dede7067b03 100644 --- a/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/ilm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -45,7 +45,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -225,8 +224,8 @@ public void testWhenUserLimitedByOnlyAliasOfIndexCanWriteToIndexWhichWasRolledov } private void createNewSingletonPolicy(RestClient client, String policy, String phaseName, LifecycleAction action) throws IOException { - Phase phase = new Phase(phaseName, TimeValue.ZERO, singletonMap(action.getWriteableName(), action)); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + Phase phase = new Phase(phaseName, TimeValue.ZERO, Map.of(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); lifecyclePolicy.toXContent(builder, null); final StringEntity entity = new StringEntity("{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java index 55daa8104c12a..f25028824b56e 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Locale; @@ -65,7 +64,7 @@ public void refreshDataStreamAndPolicy() { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, Ccr.class); + return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, Ccr.class); } @Override diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java index 7a0e00e5c4147..6d409bf474cfc 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java @@ -30,9 +30,8 @@ import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -57,7 +56,7 @@ public void refreshDataStreamAndPolicy() { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class); + return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class); } @Override @@ -100,9 +99,9 @@ public void testIndexDataTierMigration() throws Exception { logger.info("starting a cold data node"); internalCluster().startNode(coldNode(Settings.EMPTY)); - Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.emptyMap()); - Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase("cold", TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of()); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase("cold", TimeValue.ZERO, Map.of()); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase, "warm", warmPhase, "cold", coldPhase)); PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy); assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); @@ -161,9 +160,9 @@ public void testUserOptsOutOfTierMigration() throws Exception { logger.info("starting a cold data node"); internalCluster().startNode(coldNode(Settings.EMPTY)); - Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.emptyMap()); - Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.emptyMap()); - Phase coldPhase = new Phase("cold", TimeValue.ZERO, Collections.emptyMap()); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of()); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of()); + Phase coldPhase = new Phase("cold", TimeValue.ZERO, Map.of()); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase, "warm", warmPhase, "cold", coldPhase)); PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy); assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java index b443c769407c5..2c4c1c9e20bb6 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java @@ -33,10 +33,9 @@ import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -49,7 +48,7 @@ public class ILMMultiNodeIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); + return List.of(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); } @Override @@ -69,9 +68,9 @@ public void testShrinkOnTiers() throws Exception { ensureGreen(); RolloverAction rolloverAction = new RolloverAction(null, null, null, 1L, null, null, null, null, null, null); - Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.singletonMap(rolloverAction.getWriteableName(), rolloverAction)); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverAction.getWriteableName(), rolloverAction)); ShrinkAction shrinkAction = new ShrinkAction(1, null, false); - Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.singletonMap(shrinkAction.getWriteableName(), shrinkAction)); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of(shrinkAction.getWriteableName(), shrinkAction)); Map phases = new HashMap<>(); phases.put(hotPhase.getName(), hotPhase); phases.put(warmPhase.getName(), warmPhase); @@ -89,7 +88,7 @@ public void testShrinkOnTiers() throws Exception { ); ComposableIndexTemplate template = ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList(index)) + .indexPatterns(List.of(index)) .template(t) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); @@ -121,12 +120,12 @@ public void testShrinkOnTiers() throws Exception { } public void startHotOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_hot", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_hot", "ingest")).build(); internalCluster().startNode(nodeSettings); } public void startWarmOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_warm", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_warm", "ingest")).build(); internalCluster().startNode(nodeSettings); } } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java index e02dd5fe45676..b91a309a23ae5 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java @@ -34,10 +34,9 @@ import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -50,7 +49,7 @@ public class ILMMultiNodeWithCCRDisabledIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); + return List.of(LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, IndexLifecycle.class, Ccr.class); } @Override @@ -75,7 +74,7 @@ public void testShrinkOnTiers() throws Exception { actions.put(shrinkAction.getWriteableName(), shrinkAction); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-policy", Collections.singletonMap(hotPhase.getName(), hotPhase)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-policy", Map.of(hotPhase.getName(), hotPhase)); client().execute(ILMActions.PUT, new PutLifecycleRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, lifecyclePolicy)).get(); Template t = new Template( @@ -89,7 +88,7 @@ public void testShrinkOnTiers() throws Exception { ); ComposableIndexTemplate template = ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList(index)) + .indexPatterns(List.of(index)) .template(t) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); @@ -121,12 +120,12 @@ public void testShrinkOnTiers() throws Exception { } public void startHotOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_hot", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_hot", "ingest")).build(); internalCluster().startNode(nodeSettings); } public void startWarmOnlyNode() { - Settings nodeSettings = Settings.builder().putList("node.roles", Arrays.asList("master", "data_warm", "ingest")).build(); + Settings nodeSettings = Settings.builder().putList("node.roles", List.of("master", "data_warm", "ingest")).build(); internalCluster().startNode(nodeSettings); } } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index d06a9f9cc19b1..644f88dc533b9 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -56,9 +56,7 @@ import java.io.IOException; import java.time.Instant; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -112,7 +110,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); + return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); } @Before @@ -128,9 +126,9 @@ public void init() { Step.StepKey compKey = new Step.StepKey("mock", "complete", "complete"); steps.add(new ObservableClusterStateWaitStep(key, compKey)); steps.add(new PhaseCompleteStep(compKey, null)); - Map actions = Collections.singletonMap(ObservableAction.NAME, OBSERVABLE_ACTION); + Map actions = Map.of(ObservableAction.NAME, OBSERVABLE_ACTION); mockPhase = new Phase("mock", TimeValue.timeValueSeconds(0), actions); - Map phases = Collections.singletonMap("mock", mockPhase); + Map phases = Map.of("mock", mockPhase); lifecyclePolicy = newLockableLifecyclePolicy("test", phases); } @@ -311,7 +309,7 @@ public void testExplainExecution() throws Exception { updateIndexSettings(Settings.builder().put("index.lifecycle.test.complete", true), "test"); { - Phase phase = new Phase("mock", TimeValue.ZERO, Collections.singletonMap("TEST_ACTION", OBSERVABLE_ACTION)); + Phase phase = new Phase("mock", TimeValue.ZERO, Map.of("TEST_ACTION", OBSERVABLE_ACTION)); PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), phase, 1L, actualModifiedDate); assertBusy(() -> { IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse("test"); @@ -526,12 +524,12 @@ public List> getSettings() { Setting.Property.Dynamic, Setting.Property.IndexScope ); - return Collections.singletonList(COMPLETE_SETTING); + return List.of(COMPLETE_SETTING); } @Override public List getNamedXContent() { - return Arrays.asList(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ObservableAction.NAME), (p) -> { + return List.of(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ObservableAction.NAME), (p) -> { MockAction.parse(p); return OBSERVABLE_ACTION; })); @@ -539,7 +537,7 @@ public List getNamedXContent() { @Override public List getNamedWriteables() { - return Arrays.asList( + return List.of( new NamedWriteableRegistry.Entry(LifecycleType.class, LockableLifecycleType.TYPE, (in) -> LockableLifecycleType.INSTANCE), new NamedWriteableRegistry.Entry(LifecycleAction.class, ObservableAction.NAME, ObservableAction::readObservableAction), new NamedWriteableRegistry.Entry( diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java index e06c7bc2708ca..a36b74d9932d9 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java @@ -43,7 +43,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.SortedMap; import java.util.TreeMap; @@ -252,7 +251,7 @@ static List migrateIlmPolicies( ) { IndexLifecycleMetadata currentLifecycleMetadata = currentState.metadata().custom(IndexLifecycleMetadata.TYPE); if (currentLifecycleMetadata == null) { - return Collections.emptyList(); + return List.of(); } List migratedPolicies = new ArrayList<>(); @@ -811,13 +810,12 @@ static String convertAttributeValueToTierPreference(String nodeAttributeValue) { * Represents the elasticsearch abstractions that were, in some way, migrated such that the system is managing indices lifecycles and * allocations using data tiers. */ - public static final class MigratedEntities { - @Nullable - public final String removedIndexTemplateName; - public final List migratedIndices; - public final List migratedPolicies; - public final MigratedTemplates migratedTemplates; - + public record MigratedEntities( + @Nullable String removedIndexTemplateName, + List migratedIndices, + List migratedPolicies, + MigratedTemplates migratedTemplates + ) { public MigratedEntities( @Nullable String removedIndexTemplateName, List migratedIndices, @@ -829,37 +827,17 @@ public MigratedEntities( this.migratedPolicies = Collections.unmodifiableList(migratedPolicies); this.migratedTemplates = migratedTemplates; } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - MigratedEntities that = (MigratedEntities) o; - return Objects.equals(removedIndexTemplateName, that.removedIndexTemplateName) - && Objects.equals(migratedIndices, that.migratedIndices) - && Objects.equals(migratedPolicies, that.migratedPolicies) - && Objects.equals(migratedTemplates, that.migratedTemplates); - } - - @Override - public int hashCode() { - return Objects.hash(removedIndexTemplateName, migratedIndices, migratedPolicies, migratedTemplates); - } } /** * Represents the legacy, composable, and component templates that were migrated away from shard allocation settings based on custom * node attributes. */ - public static final class MigratedTemplates { - public final List migratedLegacyTemplates; - public final List migratedComposableTemplates; - public final List migratedComponentTemplates; - + public record MigratedTemplates( + List migratedLegacyTemplates, + List migratedComposableTemplates, + List migratedComponentTemplates + ) { public MigratedTemplates( List migratedLegacyTemplates, List migratedComposableTemplates, @@ -869,24 +847,5 @@ public MigratedTemplates( this.migratedComposableTemplates = Collections.unmodifiableList(migratedComposableTemplates); this.migratedComponentTemplates = Collections.unmodifiableList(migratedComponentTemplates); } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - MigratedTemplates that = (MigratedTemplates) o; - return Objects.equals(migratedLegacyTemplates, that.migratedLegacyTemplates) - && Objects.equals(migratedComposableTemplates, that.migratedComposableTemplates) - && Objects.equals(migratedComponentTemplates, that.migratedComponentTemplates); - } - - @Override - public int hashCode() { - return Objects.hash(migratedLegacyTemplates, migratedComposableTemplates, migratedComponentTemplates); - } } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java index 77b143f93576b..8c08194b11e05 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java @@ -159,7 +159,7 @@ public ClusterState doExecute(final ClusterState currentState) throws IOExceptio // to be met (eg. {@link LifecycleSettings#LIFECYCLE_STEP_WAIT_TIME_THRESHOLD_SETTING}, so it's important we // re-evaluate what the next step is after we evaluate the condition nextStepKey = currentStep.getNextStepKey(); - if (result.isComplete()) { + if (result.complete()) { logger.trace( "[{}] cluster state step condition met successfully ({}) [{}], moving to next step {}", index.getName(), @@ -180,7 +180,7 @@ public ClusterState doExecute(final ClusterState currentState) throws IOExceptio ); } } else { - final ToXContentObject stepInfo = result.getInformationContext(); + final ToXContentObject stepInfo = result.informationContext(); if (logger.isTraceEnabled()) { logger.trace( "[{}] condition not met ({}) [{}], returning existing state (info: {})", diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java index 42d1955f0d453..c5d367804db42 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java @@ -41,7 +41,6 @@ import org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -219,8 +218,8 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources GREEN, "No Index Lifecycle Management policies configured", createDetails(verbose, ilmMetadata, currentMode), - Collections.emptyList(), - Collections.emptyList() + List.of(), + List.of() ); } else if (currentMode != OperationMode.RUNNING) { return createIndicator( @@ -238,8 +237,8 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources GREEN, "Index Lifecycle Management is running", createDetails(verbose, ilmMetadata, currentMode), - Collections.emptyList(), - Collections.emptyList() + List.of(), + List.of() ); } else { return createIndicator( diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index f41524480e2df..b6e800b61337f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -91,7 +91,6 @@ import java.io.IOException; import java.time.Clock; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.function.LongSupplier; @@ -121,7 +120,7 @@ protected Clock getClock() { @Override public List> getSettings() { - return Arrays.asList( + return List.of( LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, LifecycleSettings.LIFECYCLE_NAME_SETTING, LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING, @@ -204,7 +203,7 @@ public List getNamedXContent() { } private static List xContentEntries() { - return Arrays.asList( + return List.of( // Custom Metadata new NamedXContentRegistry.Entry( Metadata.Custom.class, @@ -260,52 +259,38 @@ public List getRestHandlers( Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - List handlers = new ArrayList<>(); - - handlers.addAll( - Arrays.asList( - // add ILM rest handlers - new RestPutLifecycleAction(), - new RestGetLifecycleAction(), - new RestDeleteLifecycleAction(), - new RestExplainLifecycleAction(), - new RestRemoveIndexLifecyclePolicyAction(), - new RestMoveToStepAction(), - new RestRetryAction(), - new RestStopAction(), - new RestStartILMAction(), - new RestGetStatusAction(), - new RestMigrateToDataTiersAction() - ) + return List.of( + new RestPutLifecycleAction(), + new RestGetLifecycleAction(), + new RestDeleteLifecycleAction(), + new RestExplainLifecycleAction(), + new RestRemoveIndexLifecyclePolicyAction(), + new RestMoveToStepAction(), + new RestRetryAction(), + new RestStopAction(), + new RestStartILMAction(), + new RestGetStatusAction(), + new RestMigrateToDataTiersAction() ); - return handlers; } @Override public List> getActions() { - var ilmUsageAction = new ActionHandler<>(XPackUsageFeatureAction.INDEX_LIFECYCLE, IndexLifecycleUsageTransportAction.class); - var ilmInfoAction = new ActionHandler<>(XPackInfoFeatureAction.INDEX_LIFECYCLE, IndexLifecycleInfoTransportAction.class); - var migrateToDataTiersAction = new ActionHandler<>(MigrateToDataTiersAction.INSTANCE, TransportMigrateToDataTiersAction.class); - List> actions = new ArrayList<>(); - actions.add(ilmUsageAction); - actions.add(ilmInfoAction); - actions.add(migrateToDataTiersAction); - actions.addAll( - Arrays.asList( - // add ILM actions - new ActionHandler<>(ILMActions.PUT, TransportPutLifecycleAction.class), - new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), - new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), - new ActionHandler<>(ExplainLifecycleAction.INSTANCE, TransportExplainLifecycleAction.class), - new ActionHandler<>(RemoveIndexLifecyclePolicyAction.INSTANCE, TransportRemoveIndexLifecyclePolicyAction.class), - new ActionHandler<>(ILMActions.MOVE_TO_STEP, TransportMoveToStepAction.class), - new ActionHandler<>(ILMActions.RETRY, TransportRetryAction.class), - new ActionHandler<>(ILMActions.START, TransportStartILMAction.class), - new ActionHandler<>(ILMActions.STOP, TransportStopILMAction.class), - new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) - ) + return List.of( + new ActionHandler<>(XPackUsageFeatureAction.INDEX_LIFECYCLE, IndexLifecycleUsageTransportAction.class), + new ActionHandler<>(XPackInfoFeatureAction.INDEX_LIFECYCLE, IndexLifecycleInfoTransportAction.class), + new ActionHandler<>(MigrateToDataTiersAction.INSTANCE, TransportMigrateToDataTiersAction.class), + new ActionHandler<>(ILMActions.PUT, TransportPutLifecycleAction.class), + new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), + new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), + new ActionHandler<>(ExplainLifecycleAction.INSTANCE, TransportExplainLifecycleAction.class), + new ActionHandler<>(RemoveIndexLifecyclePolicyAction.INSTANCE, TransportRemoveIndexLifecyclePolicyAction.class), + new ActionHandler<>(ILMActions.MOVE_TO_STEP, TransportMoveToStepAction.class), + new ActionHandler<>(ILMActions.RETRY, TransportRetryAction.class), + new ActionHandler<>(ILMActions.START, TransportStartILMAction.class), + new ActionHandler<>(ILMActions.STOP, TransportStopILMAction.class), + new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) ); - return actions; } List> reservedClusterStateHandlers() { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index efa8e67fee3c8..85739dcd0dcfb 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -39,7 +40,6 @@ import java.util.Collections; import java.util.HashSet; -import java.util.Locale; import java.util.Objects; import java.util.Set; import java.util.function.LongSupplier; @@ -290,13 +290,7 @@ void onErrorMaybeRetryFailedStep(String policy, StepKey currentStep, IndexMetada // IndexLifecycleRunner#runPeriodicStep} run the policy will still be in the ERROR step, as we haven't been able // to move it back into the failed step, so we'll try again submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, - "ilm-retry-failed-step {policy [%s], index [%s], failedStep [%s]}", - policy, - index, - failedStep.getKey() - ), + Strings.format("ilm-retry-failed-step {policy [%s], index [%s], failedStep [%s]}", policy, index, failedStep.getKey()), new MoveToRetryFailedStepUpdateTask(indexMetadata.getIndex(), policy, currentStep, failedStep) ); } else { @@ -444,7 +438,7 @@ void runPolicyAfterStateChange(String policy, IndexMetadata indexMetadata) { } else if (currentStep instanceof ClusterStateActionStep || currentStep instanceof ClusterStateWaitStep) { logger.debug("[{}] running policy with current-step [{}]", indexMetadata.getIndex().getName(), currentStep.getKey()); submitUnlessAlreadyQueued( - String.format(Locale.ROOT, "ilm-execute-cluster-state-steps [%s]", currentStep), + Strings.format("ilm-execute-cluster-state-steps [%s]", currentStep), new ExecuteStepsUpdateTask(policy, indexMetadata.getIndex(), currentStep, stepRegistry, this, nowSupplier) ); } else { @@ -459,8 +453,7 @@ void runPolicyAfterStateChange(String policy, IndexMetadata indexMetadata) { private void moveToStep(Index index, String policy, Step.StepKey currentStepKey, Step.StepKey newStepKey) { logger.debug("[{}] moving to step [{}] {} -> {}", index.getName(), policy, currentStepKey, newStepKey); submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, + Strings.format( "ilm-move-to-step {policy [%s], index [%s], currentStep [%s], nextStep [%s]}", policy, index.getName(), @@ -486,13 +479,7 @@ private void moveToErrorStep(Index index, String policy, Step.StepKey currentSte e ); submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, - "ilm-move-to-error-step {policy [%s], index [%s], currentStep [%s]}", - policy, - index.getName(), - currentStepKey - ), + Strings.format("ilm-move-to-error-step {policy [%s], index [%s], currentStep [%s]}", policy, index.getName(), currentStepKey), new MoveToErrorStepUpdateTask(index, policy, currentStepKey, e, nowSupplier, stepRegistry::getStep, clusterState -> { IndexMetadata indexMetadata = clusterState.metadata().index(index); registerFailedOperation(indexMetadata, e); @@ -506,13 +493,7 @@ private void moveToErrorStep(Index index, String policy, Step.StepKey currentSte */ private void setStepInfo(Index index, String policy, @Nullable Step.StepKey currentStepKey, ToXContentObject stepInfo) { submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, - "ilm-set-step-info {policy [%s], index [%s], currentStep [%s]}", - policy, - index.getName(), - currentStepKey - ), + Strings.format("ilm-set-step-info {policy [%s], index [%s], currentStep [%s]}", policy, index.getName(), currentStepKey), new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo) ); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index 9c978ffc25cba..71d61caa5fe31 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -54,7 +54,6 @@ import java.io.Closeable; import java.time.Clock; import java.util.Collection; -import java.util.Collections; import java.util.Map; import java.util.Set; import java.util.function.LongSupplier; @@ -354,7 +353,7 @@ private void cancelJob() { @Override public void triggered(SchedulerEngine.Event event) { if (event.jobName().equals(XPackField.INDEX_LIFECYCLE)) { - logger.trace("job triggered: " + event.jobName() + ", " + event.scheduledTime() + ", " + event.triggeredTime()); + logger.trace("job triggered: {}, {}, {}", event.jobName(), event.scheduledTime(), event.triggeredTime()); triggerPolicies(clusterService.state(), false); } } @@ -500,7 +499,7 @@ static Set indicesOnShuttingDownNodesInDangerousStep(ClusterState state, SingleNodeShutdownMetadata.Type.REPLACE ); if (shutdownNodes.isEmpty()) { - return Collections.emptySet(); + return Set.of(); } Set indicesPreventingShutdown = state.metadata() diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java index 4567e291aebed..296623b54509f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -42,7 +43,6 @@ import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -269,9 +269,8 @@ public Set parseStepKeysFromPhase(String policy, String currentPha return parseStepsFromPhase(policy, currentPhase, phaseDefNonNull).stream().map(Step::getKey).collect(Collectors.toSet()); } catch (IOException e) { logger.trace( - () -> String.format( - Locale.ROOT, - "unable to parse steps for policy [{}], phase [{}], and phase definition [{}]", + () -> Strings.format( + "unable to parse steps for policy [%s], phase [%s], and phase definition [%s]", policy, currentPhase, phaseDef diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java index f4598727d6123..5fa0f881559fb 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java @@ -32,7 +32,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -71,7 +70,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A IndexLifecycleMetadata metadata = clusterService.state().metadata().custom(IndexLifecycleMetadata.TYPE); if (metadata == null) { if (request.getPolicyNames().length == 0) { - listener.onResponse(new Response(Collections.emptyList())); + listener.onResponse(new Response(List.of())); } else { listener.onFailure( new ResourceNotFoundException("Lifecycle policy not found: {}", Arrays.toString(request.getPolicyNames())) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java index 48cf84ed7a6a4..ef7554beed9e9 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java @@ -100,12 +100,12 @@ protected void masterOperation( ).v2(); listener.onResponse( new MigrateToDataTiersResponse( - entities.removedIndexTemplateName, - entities.migratedPolicies, - entities.migratedIndices, - entities.migratedTemplates.migratedLegacyTemplates, - entities.migratedTemplates.migratedComposableTemplates, - entities.migratedTemplates.migratedComponentTemplates, + entities.removedIndexTemplateName(), + entities.migratedPolicies(), + entities.migratedIndices(), + entities.migratedTemplates().migratedLegacyTemplates(), + entities.migratedTemplates().migratedComposableTemplates(), + entities.migratedTemplates().migratedComponentTemplates(), true ) ); @@ -145,7 +145,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - rerouteService.reroute("cluster migrated to data tiers routing", Priority.NORMAL, new ActionListener() { + rerouteService.reroute("cluster migrated to data tiers routing", Priority.NORMAL, new ActionListener<>() { @Override public void onResponse(Void ignored) {} @@ -161,12 +161,12 @@ public void onFailure(Exception e) { MigratedEntities entities = migratedEntities.get(); listener.onResponse( new MigrateToDataTiersResponse( - entities.removedIndexTemplateName, - entities.migratedPolicies, - entities.migratedIndices, - entities.migratedTemplates.migratedLegacyTemplates, - entities.migratedTemplates.migratedComposableTemplates, - entities.migratedTemplates.migratedComponentTemplates, + entities.removedIndexTemplateName(), + entities.migratedPolicies(), + entities.migratedIndices(), + entities.migratedTemplates().migratedLegacyTemplates(), + entities.migratedTemplates().migratedComposableTemplates(), + entities.migratedTemplates().migratedComponentTemplates(), false ) ); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java index 977887a0487f3..efd54e05cb153 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryItem.java @@ -18,7 +18,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; -import java.util.Collections; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; @@ -110,7 +110,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } private static String exceptionToString(Exception exception) { - Params stacktraceParams = new MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); + Params stacktraceParams = new MapParams(Map.of(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); String exceptionString; try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { causeXContentBuilder.startObject(); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java index b8af3e8e0daa2..549b321be8182 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java @@ -58,7 +58,7 @@ public class ILMHistoryStore implements Closeable { public static final String ILM_HISTORY_DATA_STREAM = "ilm-history-" + INDEX_TEMPLATE_VERSION; - private static int ILM_HISTORY_BULK_SIZE = StrictMath.toIntExact( + private static final int ILM_HISTORY_BULK_SIZE = StrictMath.toIntExact( ByteSizeValue.parseBytesSizeValue( System.getProperty("es.indices.lifecycle.history.bulk.size", "50MB"), "es.indices.lifecycle.history.bulk.size" diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java index 51df651ea4a4c..2ee133b6292bd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java @@ -48,7 +48,6 @@ import java.io.ByteArrayInputStream; import java.nio.charset.StandardCharsets; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -118,10 +117,7 @@ public void testMigrateIlmPolicyForIndexWithoutILMMetadata() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(IndexMetadata.builder(indexName).settings(getBaseIndexSettings())) .build() @@ -176,7 +172,7 @@ public void testMigrateIlmPolicyForPhaseWithDeactivatedMigrateAction() { ); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -186,10 +182,7 @@ public void testMigrateIlmPolicyForPhaseWithDeactivatedMigrateAction() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(IndexMetadata.builder(indexName).settings(getBaseIndexSettings())) .build() @@ -245,10 +238,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -302,10 +292,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { .putCustom( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( - Collections.singletonMap( - policyMetadataWithTotalShardsPerNode.getName(), - policyMetadataWithTotalShardsPerNode - ), + Map.of(policyMetadataWithTotalShardsPerNode.getName(), policyMetadataWithTotalShardsPerNode), OperationMode.STOPPED ) ) @@ -352,10 +339,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -406,10 +390,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -456,10 +437,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { Metadata.builder() .putCustom( IndexLifecycleMetadata.TYPE, - new IndexLifecycleMetadata( - Collections.singletonMap(policyMetadata.getName(), policyMetadata), - OperationMode.STOPPED - ) + new IndexLifecycleMetadata(Map.of(policyMetadata.getName(), policyMetadata), OperationMode.STOPPED) ) .put(indexMetadata) .build() @@ -1008,7 +986,7 @@ public void testMigrateToDataTiersRouting() { ); LifecyclePolicyMetadata policyWithDataAttribute = new LifecyclePolicyMetadata( policyToMigrate, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -1026,7 +1004,7 @@ public void testMigrateToDataTiersRouting() { ); LifecyclePolicyMetadata policyWithOtherAttribute = new LifecyclePolicyMetadata( shouldntBeMigratedPolicy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -1080,11 +1058,11 @@ public void testMigrateToDataTiersRouting() { ); MigratedEntities migratedEntities = migratedEntitiesTuple.v2(); - assertThat(migratedEntities.removedIndexTemplateName, is("catch-all")); - assertThat(migratedEntities.migratedPolicies.size(), is(1)); - assertThat(migratedEntities.migratedPolicies.get(0), is(lifecycleName)); - assertThat(migratedEntities.migratedIndices.size(), is(2)); - assertThat(migratedEntities.migratedIndices, hasItems("indexWithWarmDataAttribute", "indexWithUnknownDataAttribute")); + assertThat(migratedEntities.removedIndexTemplateName(), is("catch-all")); + assertThat(migratedEntities.migratedPolicies().size(), is(1)); + assertThat(migratedEntities.migratedPolicies().get(0), is(lifecycleName)); + assertThat(migratedEntities.migratedIndices().size(), is(2)); + assertThat(migratedEntities.migratedIndices(), hasItems("indexWithWarmDataAttribute", "indexWithUnknownDataAttribute")); ClusterState newState = migratedEntitiesTuple.v1(); assertThat(newState.metadata().getTemplates().size(), is(1)); @@ -1105,11 +1083,11 @@ public void testMigrateToDataTiersRouting() { ); MigratedEntities migratedEntities = migratedEntitiesTuple.v2(); - assertThat(migratedEntities.removedIndexTemplateName, nullValue()); - assertThat(migratedEntities.migratedPolicies.size(), is(1)); - assertThat(migratedEntities.migratedPolicies.get(0), is(lifecycleName)); - assertThat(migratedEntities.migratedIndices.size(), is(2)); - assertThat(migratedEntities.migratedIndices, hasItems("indexWithWarmDataAttribute", "indexWithUnknownDataAttribute")); + assertThat(migratedEntities.removedIndexTemplateName(), nullValue()); + assertThat(migratedEntities.migratedPolicies().size(), is(1)); + assertThat(migratedEntities.migratedPolicies().get(0), is(lifecycleName)); + assertThat(migratedEntities.migratedIndices().size(), is(2)); + assertThat(migratedEntities.migratedIndices(), hasItems("indexWithWarmDataAttribute", "indexWithUnknownDataAttribute")); ClusterState newState = migratedEntitiesTuple.v1(); assertThat(newState.metadata().getTemplates().size(), is(2)); @@ -1130,10 +1108,10 @@ public void testMigrateToDataTiersRouting() { ); MigratedEntities migratedEntities = migratedEntitiesTuple.v2(); - assertThat(migratedEntities.migratedPolicies.size(), is(1)); - assertThat(migratedEntities.migratedPolicies.get(0), is(lifecycleName)); - assertThat(migratedEntities.migratedIndices.size(), is(2)); - assertThat(migratedEntities.migratedIndices, hasItems("indexWithWarmDataAttribute", "indexWithUnknownDataAttribute")); + assertThat(migratedEntities.migratedPolicies().size(), is(1)); + assertThat(migratedEntities.migratedPolicies().get(0), is(lifecycleName)); + assertThat(migratedEntities.migratedIndices().size(), is(2)); + assertThat(migratedEntities.migratedIndices(), hasItems("indexWithWarmDataAttribute", "indexWithUnknownDataAttribute")); IndexMetadata migratedIndex; migratedIndex = migratedEntitiesTuple.v1().metadata().index("indexWithWarmDataAttribute"); @@ -1185,9 +1163,9 @@ public void testMigrateToDataTiersRoutingRequiresILMStopped() { null, false ); - assertThat(migratedState.v2().migratedIndices, empty()); - assertThat(migratedState.v2().migratedPolicies, empty()); - assertThat(migratedState.v2().removedIndexTemplateName, nullValue()); + assertThat(migratedState.v2().migratedIndices(), empty()); + assertThat(migratedState.v2().migratedPolicies(), empty()); + assertThat(migratedState.v2().removedIndexTemplateName(), nullValue()); } } @@ -1215,7 +1193,7 @@ public void testDryRunDoesntRequireILMStopped() { public void testMigrationDoesNotRemoveComposableTemplates() { ComposableIndexTemplate composableIndexTemplate = ComposableIndexTemplate.builder() - .indexPatterns(Collections.singletonList("*")) + .indexPatterns(List.of("*")) .template(new Template(Settings.builder().put(DATA_ROUTING_REQUIRE_SETTING, "hot").build(), null, null)) .build(); @@ -1232,7 +1210,7 @@ public void testMigrationDoesNotRemoveComposableTemplates() { null, false ); - assertThat(migratedEntitiesTuple.v2().removedIndexTemplateName, nullValue()); + assertThat(migratedEntitiesTuple.v2().removedIndexTemplateName(), nullValue()); // the composable template still exists, however it was migrated to not use the custom require.data routing setting assertThat(migratedEntitiesTuple.v1().metadata().templatesV2().get(composableTemplateName), is(notNullValue())); } @@ -1285,7 +1263,7 @@ private LifecyclePolicyMetadata getWarmColdPolicyMeta( new Phase("cold", TimeValue.ZERO, Map.of(coldAllocateAction.getWriteableName(), coldAllocateAction)) ) ); - return new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()); + return new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()); } public void testMigrateLegacyIndexTemplates() { @@ -1676,9 +1654,9 @@ public void testMigrateIndexAndComponentTemplates() { Metadata.Builder mb = Metadata.builder(clusterState.metadata()); MetadataMigrateToDataTiersRoutingService.MigratedTemplates migratedTemplates = MetadataMigrateToDataTiersRoutingService .migrateIndexAndComponentTemplates(mb, clusterState, nodeAttrName); - assertThat(migratedTemplates.migratedLegacyTemplates, is(List.of("template-with-require-routing"))); - assertThat(migratedTemplates.migratedComposableTemplates, is(List.of("composable-template-with-require-routing"))); - assertThat(migratedTemplates.migratedComponentTemplates, is(List.of("component-with-require-and-include-routing"))); + assertThat(migratedTemplates.migratedLegacyTemplates(), is(List.of("template-with-require-routing"))); + assertThat(migratedTemplates.migratedComposableTemplates(), is(List.of("composable-template-with-require-routing"))); + assertThat(migratedTemplates.migratedComponentTemplates(), is(List.of("component-with-require-and-include-routing"))); } private String getWarmPhaseDef() { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java index b3146e81d08fc..06d11bff069fd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java @@ -42,9 +42,8 @@ import org.mockito.Mockito; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; @@ -91,42 +90,33 @@ public void prepareState() throws IOException { Phase mixedPhase = new Phase( "first_phase", TimeValue.ZERO, - Collections.singletonMap(MockAction.NAME, new MockAction(Arrays.asList(firstStep, secondStep, thirdStep))) + Map.of(MockAction.NAME, new MockAction(List.of(firstStep, secondStep, thirdStep))) ); Phase allClusterPhase = new Phase( "first_phase", TimeValue.ZERO, - Collections.singletonMap(MockAction.NAME, new MockAction(Arrays.asList(firstStep, allClusterSecondStep))) + Map.of(MockAction.NAME, new MockAction(List.of(firstStep, allClusterSecondStep))) ); Phase invalidPhase = new Phase( "invalid_phase", TimeValue.ZERO, - Collections.singletonMap( - MockAction.NAME, - new MockAction(Arrays.asList(new MockClusterStateActionStep(firstStepKey, invalidStepKey))) - ) - ); - LifecyclePolicy mixedPolicy = newTestLifecyclePolicy(mixedPolicyName, Collections.singletonMap(mixedPhase.getName(), mixedPhase)); - LifecyclePolicy allClusterPolicy = newTestLifecyclePolicy( - allClusterPolicyName, - Collections.singletonMap(allClusterPhase.getName(), allClusterPhase) - ); - LifecyclePolicy invalidPolicy = newTestLifecyclePolicy( - invalidPolicyName, - Collections.singletonMap(invalidPhase.getName(), invalidPhase) + Map.of(MockAction.NAME, new MockAction(List.of(new MockClusterStateActionStep(firstStepKey, invalidStepKey)))) ); + LifecyclePolicy mixedPolicy = newTestLifecyclePolicy(mixedPolicyName, Map.of(mixedPhase.getName(), mixedPhase)); + LifecyclePolicy allClusterPolicy = newTestLifecyclePolicy(allClusterPolicyName, Map.of(allClusterPhase.getName(), allClusterPhase)); + LifecyclePolicy invalidPolicy = newTestLifecyclePolicy(invalidPolicyName, Map.of(invalidPhase.getName(), invalidPhase)); Map policyMap = new HashMap<>(); policyMap.put( mixedPolicyName, - new LifecyclePolicyMetadata(mixedPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + new LifecyclePolicyMetadata(mixedPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); policyMap.put( allClusterPolicyName, - new LifecyclePolicyMetadata(allClusterPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + new LifecyclePolicyMetadata(allClusterPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); policyMap.put( invalidPolicyName, - new LifecyclePolicyMetadata(invalidPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + new LifecyclePolicyMetadata(invalidPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); policyStepsRegistry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client, null); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java index 9e2a67caac253..7a37aaba96c18 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import java.io.IOException; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -177,7 +176,7 @@ public void testIsYellowWhenNotRunningAndPoliciesConfigured() { YELLOW, "Index Lifecycle Management is not running", new SimpleHealthIndicatorDetails(Map.of("ilm_status", status, "policies", 1, "stagnating_indices", 0)), - Collections.singletonList( + List.of( new HealthIndicatorImpact( NAME, IlmHealthIndicatorService.AUTOMATION_DISABLED_IMPACT_ID, @@ -251,7 +250,7 @@ public void testSkippingFieldsWhenVerboseIsFalse() { YELLOW, "Index Lifecycle Management is not running", HealthIndicatorDetails.EMPTY, - Collections.singletonList( + List.of( new HealthIndicatorImpact( NAME, IlmHealthIndicatorService.AUTOMATION_DISABLED_IMPACT_ID, diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java index d81faf6a398d7..4e8d7440eb773 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java @@ -33,7 +33,6 @@ import org.mockito.Mockito; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -75,18 +74,18 @@ public void testUsageStats() throws Exception { indexPolicies.put("index_3", policy1Name); indexPolicies.put("index_4", policy1Name); indexPolicies.put("index_5", policy3Name); - LifecyclePolicy policy1 = new LifecyclePolicy(policy1Name, Collections.emptyMap()); + LifecyclePolicy policy1 = new LifecyclePolicy(policy1Name, Map.of()); policies.add(policy1); - PolicyStats policy1Stats = new PolicyStats(Collections.emptyMap(), 4); + PolicyStats policy1Stats = new PolicyStats(Map.of(), 4); Map phases1 = new HashMap<>(); LifecyclePolicy policy2 = new LifecyclePolicy(policy2Name, phases1); policies.add(policy2); - PolicyStats policy2Stats = new PolicyStats(Collections.emptyMap(), 0); + PolicyStats policy2Stats = new PolicyStats(Map.of(), 0); - LifecyclePolicy policy3 = new LifecyclePolicy(policy3Name, Collections.emptyMap()); + LifecyclePolicy policy3 = new LifecyclePolicy(policy3Name, Map.of()); policies.add(policy3); - PolicyStats policy3Stats = new PolicyStats(Collections.emptyMap(), 1); + PolicyStats policy3Stats = new PolicyStats(Map.of(), 1); ClusterState clusterState = buildClusterState(policies, indexPolicies); Mockito.when(clusterService.state()).thenReturn(clusterState); @@ -110,7 +109,7 @@ public void testUsageStats() throws Exception { private ClusterState buildClusterState(List lifecyclePolicies, Map indexPolicies) { Map lifecyclePolicyMetadatasMap = lifecyclePolicies.stream() - .map(p -> new LifecyclePolicyMetadata(p, Collections.emptyMap(), 1, 0L)) + .map(p -> new LifecyclePolicyMetadata(p, Map.of(), 1, 0L)) .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())); IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java index e757488c2690e..ece83fe6bc437 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java @@ -44,8 +44,6 @@ import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.SortedMap; @@ -63,10 +61,7 @@ protected IndexLifecycleMetadata createTestInstance() { Map policies = Maps.newMapWithExpectedSize(numPolicies); for (int i = 0; i < numPolicies; i++) { LifecyclePolicy policy = randomTimeseriesLifecyclePolicy(randomAlphaOfLength(4) + i); - policies.put( - policy.getName(), - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policies.put(policy.getName(), new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); } return new IndexLifecycleMetadata(policies, randomFrom(OperationMode.values())); } @@ -84,7 +79,7 @@ protected Reader instanceReader() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Arrays.asList( + List.of( new NamedWriteableRegistry.Entry( LifecycleType.class, TimeseriesLifecycleType.TYPE, @@ -111,7 +106,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll( - Arrays.asList( + List.of( new NamedXContentRegistry.Entry( LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), @@ -155,7 +150,7 @@ protected Metadata.Custom mutateInstance(Custom instance) { policyName, new LifecyclePolicyMetadata( randomTimeseriesLifecyclePolicy(policyName), - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ) @@ -192,9 +187,9 @@ public static IndexLifecycleMetadata createTestInstance(int numPolicies, Operati Map phases = Maps.newMapWithExpectedSize(numberPhases); for (int j = 0; j < numberPhases; j++) { TimeValue after = randomTimeValue(0, 1_000_000_000, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS); - Map actions = Collections.emptyMap(); + Map actions = Map.of(); if (randomBoolean()) { - actions = Collections.singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE); + actions = Map.of(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE); } String phaseName = randomAlphaOfLength(10); phases.put(phaseName, new Phase(phaseName, after, actions)); @@ -204,7 +199,7 @@ public static IndexLifecycleMetadata createTestInstance(int numPolicies, Operati policyName, new LifecyclePolicyMetadata( newTestLifecyclePolicy(policyName, phases), - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ) diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 8a4859fcd8b77..374f10b604f18 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -73,8 +73,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -89,7 +87,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; -import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING; @@ -248,7 +245,7 @@ public void testRunPolicyErrorStepOnRetryableFailedStep() { List waitForRolloverStepList = action.toSteps(client, phaseName, null) .stream() .filter(s -> s.getKey().name().equals(WaitForRolloverReadyStep.NAME)) - .collect(toList()); + .toList(); assertThat(waitForRolloverStepList.size(), is(1)); Step waitForRolloverStep = waitForRolloverStepList.get(0); StepKey stepKey = waitForRolloverStep.getKey(); @@ -288,7 +285,7 @@ public void testRunStateChangePolicyWithNoNextStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -317,7 +314,7 @@ public void testRunStateChangePolicyWithNextStep() throws Exception { StepKey nextStepKey = new StepKey("phase", "action", "next_cluster_state_action_step"); MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); MockClusterStateActionStep nextStep = new MockClusterStateActionStep(nextStepKey, null); - MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, Arrays.asList(step, nextStep)); + MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, List.of(step, nextStep)); stepRegistry.setResolver((i, k) -> { if (stepKey.equals(k)) { return step; @@ -340,7 +337,7 @@ public void testRunStateChangePolicyWithNextStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -427,7 +424,7 @@ public void doTestRunPolicyWithFailureToReadPolicy(boolean asyncAction, boolean .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -476,7 +473,7 @@ public void testRunAsyncActionDoesNotRun() { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -503,7 +500,7 @@ public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { StepKey nextStepKey = new StepKey("phase", "action", "async_action_step"); MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); MockAsyncActionStep nextStep = new MockAsyncActionStep(nextStepKey, null); - MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, Arrays.asList(step, nextStep)); + MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, List.of(step, nextStep)); stepRegistry.setResolver((i, k) -> { if (stepKey.equals(k)) { return step; @@ -526,7 +523,7 @@ public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -603,7 +600,7 @@ public void testRunPeriodicStep() throws Exception { .build(); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); DiscoveryNode node = clusterService.localNode(); - IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) @@ -785,7 +782,7 @@ public void testGetCurrentStep() { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases(policyName); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); String phaseName = randomFrom(policy.getPhases().keySet()); Phase phase = policy.getPhases().get(phaseName); PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); @@ -824,7 +821,7 @@ public void testIsReadyToTransition() { StepKey stepKey = new StepKey("phase", MockAction.NAME, MockAction.NAME); MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); SortedMap lifecyclePolicyMap = new TreeMap<>( - Collections.singletonMap( + Map.of( policyName, new LifecyclePolicyMetadata( createPolicy(policyName, null, step.getKey()), @@ -834,9 +831,9 @@ public void testIsReadyToTransition() { ) ) ); - Map firstStepMap = Collections.singletonMap(policyName, step); - Map policySteps = Collections.singletonMap(step.getKey(), step); - Map> stepMap = Collections.singletonMap(policyName, policySteps); + Map firstStepMap = Map.of(policyName, step); + Map policySteps = Map.of(step.getKey(), step); + Map> stepMap = Map.of(policyName, policySteps); PolicyStepsRegistry policyStepsRegistry = new PolicyStepsRegistry( lifecyclePolicyMap, firstStepMap, @@ -897,7 +894,7 @@ private static LifecyclePolicy createPolicy(String policyName, StepKey safeStep, assert unsafeStep == null || safeStep.phase().equals(unsafeStep.phase()) == false : "safe and unsafe actions must be in different phases"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(safeStep, null)); + List steps = List.of(new MockStep(safeStep, null)); MockAction safeAction = new MockAction(steps, true); actions.put(safeAction.getWriteableName(), safeAction); Phase phase = new Phase(safeStep.phase(), TimeValue.timeValueMillis(0), actions); @@ -906,7 +903,7 @@ private static LifecyclePolicy createPolicy(String policyName, StepKey safeStep, if (unsafeStep != null) { assert MockAction.NAME.equals(unsafeStep.action()) : "The unsafe action needs to be MockAction.NAME"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(unsafeStep, null)); + List steps = List.of(new MockStep(unsafeStep, null)); MockAction unsafeAction = new MockAction(steps, false); actions.put(unsafeAction.getWriteableName(), unsafeAction); Phase phase = new Phase(unsafeStep.phase(), TimeValue.timeValueMillis(0), actions); @@ -1233,7 +1230,7 @@ public Step getStep(IndexMetadata indexMetadata, StepKey stepKey) { } public static MockPolicyStepsRegistry createOneStepPolicyStepRegistry(String policyName, Step step) { - return createMultiStepPolicyStepRegistry(policyName, Collections.singletonList(step)); + return createMultiStepPolicyStepRegistry(policyName, List.of(step)); } public static MockPolicyStepsRegistry createMultiStepPolicyStepRegistry(String policyName, List steps) { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java index eceb81542377a..b77e643bc2853 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java @@ -58,9 +58,9 @@ import java.time.Clock; import java.time.Instant; import java.time.ZoneId; -import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.UUID; @@ -114,7 +114,7 @@ public void prepareServices() { }).when(executorService).execute(any()); Settings settings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s").build(); when(clusterService.getClusterSettings()).thenReturn( - new ClusterSettings(settings, Collections.singleton(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING)) + new ClusterSettings(settings, Set.of(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING)) ); when(clusterService.lifecycleState()).thenReturn(State.STARTED); @@ -154,14 +154,11 @@ public void testStoppedModeSkip() { randomStepKey(), randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); IndexMetadata indexMetadata = IndexMetadata.builder(index.getName()) .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) @@ -191,14 +188,11 @@ public void testRequestedStopOnShrink() { mockShrinkStep, randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); lifecycleState.setPhase(mockShrinkStep.phase()); @@ -250,14 +244,11 @@ private void verifyCanStopWithStep(String stoppableStep) { mockShrinkStep, randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); lifecycleState.setPhase(mockShrinkStep.phase()); @@ -301,14 +292,11 @@ public void testRequestedStopOnSafeAction() { currentStepKey, randomStepKey() ); - MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); - Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + MockAction mockAction = new MockAction(List.of(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Map.of(phase.getName(), phase)); SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policyName, - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); lifecycleState.setPhase(currentStepKey.phase()); @@ -370,9 +358,9 @@ public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) { } else { i1mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i1currentStepKey, randomStepKey()); } - MockAction i1mockAction = new MockAction(Collections.singletonList(i1mockStep)); - Phase i1phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", i1mockAction)); - LifecyclePolicy i1policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i1phase.getName(), i1phase)); + MockAction i1mockAction = new MockAction(List.of(i1mockStep)); + Phase i1phase = new Phase("phase", TimeValue.ZERO, Map.of("action", i1mockAction)); + LifecyclePolicy i1policy = newTestLifecyclePolicy(policy1, Map.of(i1phase.getName(), i1phase)); Index index1 = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); LifecycleExecutionState.Builder i1lifecycleState = LifecycleExecutionState.builder(); i1lifecycleState.setPhase(i1currentStepKey.phase()); @@ -387,9 +375,9 @@ public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) { } else { i2mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i2currentStepKey, randomStepKey()); } - MockAction mockAction = new MockAction(Collections.singletonList(i2mockStep)); - Phase i2phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); - LifecyclePolicy i2policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i2phase.getName(), i1phase)); + MockAction mockAction = new MockAction(List.of(i2mockStep)); + Phase i2phase = new Phase("phase", TimeValue.ZERO, Map.of("action", mockAction)); + LifecyclePolicy i2policy = newTestLifecyclePolicy(policy1, Map.of(i2phase.getName(), i1phase)); Index index2 = new Index( randomValueOtherThan(index1.getName(), () -> randomAlphaOfLengthBetween(1, 20)), randomAlphaOfLengthBetween(1, 20) @@ -422,14 +410,8 @@ public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) { } SortedMap policyMap = new TreeMap<>(); - policyMap.put( - policy1, - new LifecyclePolicyMetadata(i1policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); - policyMap.put( - policy2, - new LifecyclePolicyMetadata(i2policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMap.put(policy1, new LifecyclePolicyMetadata(i1policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); + policyMap.put(policy2, new LifecyclePolicyMetadata(i2policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); IndexMetadata i1indexMetadata = IndexMetadata.builder(index1.getName()) .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policy1)) @@ -533,14 +515,8 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { SingleNodeShutdownMetadata.Type.REPLACE )) { ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build(); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), - equalTo(Collections.emptySet()) - ); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.emptySet()) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Set.of())); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Set.of())); IndexMetadata nonDangerousIndex = IndexMetadata.builder("no_danger") .settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy")) @@ -583,7 +559,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { Map indices = Map.of("no_danger", nonDangerousIndex, "danger", dangerousIndex); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)) + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -612,14 +588,8 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .build(); // No danger yet, because no node is shutting down - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), - equalTo(Collections.emptySet()) - ); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.emptySet()) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Set.of())); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Set.of())); state = ClusterState.builder(state) .metadata( @@ -627,7 +597,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "shutdown_node", SingleNodeShutdownMetadata.builder() .setNodeId("shutdown_node") @@ -642,15 +612,12 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { ) .build(); - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), - equalTo(Collections.emptySet()) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Set.of())); // No danger, because this is a "RESTART" type shutdown assertThat( "restart type shutdowns are not considered dangerous", IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.emptySet()) + equalTo(Set.of()) ); final String targetNodeName = type == SingleNodeShutdownMetadata.Type.REPLACE ? randomAlphaOfLengthBetween(10, 20) : null; @@ -661,7 +628,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( - Collections.singletonMap( + Map.of( "shutdown_node", SingleNodeShutdownMetadata.builder() .setNodeId("shutdown_node") @@ -679,10 +646,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { .build(); // The dangerous index should be calculated as being in danger now - assertThat( - IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), - equalTo(Collections.singleton("danger")) - ); + assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Set.of("danger"))); } } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java index 37d586240eb7a..a1f51f1fae90f 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java @@ -48,7 +48,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -72,7 +71,7 @@ public class IndexLifecycleTransitionTests extends ESTestCase { public void testMoveClusterStateToNextStep() { String indexName = "my_index"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); Phase nextPhase = policy.getPhases() @@ -80,8 +79,8 @@ public void testMoveClusterStateToNextStep() { .stream() .findFirst() .orElseThrow(() -> new AssertionError("expected next phase to be present")); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStep = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStep = new Step.StepKey(nextPhase.getName(), "next_action", "next_step"); @@ -125,11 +124,11 @@ public void testMoveClusterStateToNextStep() { public void testMoveClusterStateToNextStepSamePhase() { String indexName = "my_index"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStep = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStep = new Step.StepKey("current_phase", "next_action", "next_step"); @@ -176,11 +175,11 @@ public void testMoveClusterStateToNextStepSamePhase() { public void testMoveClusterStateToNextStepSameAction() { String indexName = "my_index"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStep = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStep = new Step.StepKey("current_phase", "current_action", "next_step"); @@ -228,7 +227,7 @@ public void testSuccessfulValidatedMoveClusterStateToNextStep() { String indexName = "my_index"; String policyName = "my_policy"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy(policyName) ); Phase nextPhase = policy.getPhases() @@ -236,8 +235,8 @@ public void testSuccessfulValidatedMoveClusterStateToNextStep() { .stream() .findFirst() .orElseThrow(() -> new AssertionError("expected next phase to be present")); - List policyMetadatas = Collections.singletonList( - new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) + List policyMetadatas = List.of( + new LifecyclePolicyMetadata(policy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong()) ); Step.StepKey currentStepKey = new Step.StepKey("current_phase", "current_action", "current_step"); Step.StepKey nextStepKey = new Step.StepKey(nextPhase.getName(), "next_action", "next_step"); @@ -279,7 +278,7 @@ public void testValidatedMoveClusterStateToNextStepWithoutPolicy() { lifecycleState.setAction(currentStepKey.action()); lifecycleState.setStep(currentStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -303,7 +302,7 @@ public void testValidatedMoveClusterStateToNextStepInvalidNextStep() { lifecycleState.setAction(currentStepKey.action()); lifecycleState.setStep(currentStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -325,7 +324,7 @@ public void testMoveClusterStateToErrorStep() throws IOException { lifecycleState.setPhase(currentStep.phase()); lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); - ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState newClusterState = IndexLifecycleTransition.moveClusterStateToErrorStep( @@ -359,7 +358,7 @@ public void testAddStepInfoToClusterState() throws IOException { lifecycleState.setPhase(currentStep.phase()); lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); - ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), List.of()); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState newClusterState = IndexLifecycleTransition.addStepInfoToClusterState(index, clusterState, stepInfo); assertClusterStateStepInfo(clusterState, index, currentStep, newClusterState, stepInfo); @@ -378,9 +377,7 @@ public void testRemovePolicyForIndex() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -399,7 +396,7 @@ public void testRemovePolicyForIndexNoCurrentPolicy() { indexName, indexSettingsBuilder, LifecycleExecutionState.builder().build(), - Collections.emptyList() + List.of() ); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -414,7 +411,7 @@ public void testRemovePolicyForIndexNoCurrentPolicy() { public void testRemovePolicyForIndexIndexDoesntExist() { String indexName = randomAlphaOfLength(10); String oldPolicyName = "old_policy"; - LifecyclePolicy oldPolicy = newTestLifecyclePolicy(oldPolicyName, Collections.emptyMap()); + LifecyclePolicy oldPolicy = newTestLifecyclePolicy(oldPolicyName, Map.of()); Step.StepKey currentStep = AbstractStepTestCase.randomStepKey(); Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); @@ -422,9 +419,7 @@ public void testRemovePolicyForIndexIndexDoesntExist() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = new Index("doesnt_exist", "im_not_here"); Index[] indices = new Index[] { index }; @@ -448,9 +443,7 @@ public void testRemovePolicyForIndexIndexInUnsafe() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -475,9 +468,7 @@ public void testRemovePolicyWithIndexingComplete() { lifecycleState.setAction(currentStep.action()); lifecycleState.setStep(currentStep.name()); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = clusterState.metadata().index(indexName).getIndex(); Index[] indices = new Index[] { index }; @@ -756,7 +747,7 @@ public void testMoveClusterStateToFailedStep() { LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -771,12 +762,7 @@ public void testMoveClusterStateToFailedStep() { lifecycleState.setStep(errorStepKey.name()); lifecycleState.setStepTime(now); lifecycleState.setFailedStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState( - indexName, - indexSettingsBuilder, - lifecycleState.build(), - Collections.singletonList(policyMetadata) - ); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of(policyMetadata)); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState nextClusterState = IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep( clusterState, @@ -802,7 +788,7 @@ public void testMoveClusterStateToFailedStepWithUnknownStep() { LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -817,12 +803,7 @@ public void testMoveClusterStateToFailedStepWithUnknownStep() { lifecycleState.setStep(errorStepKey.name()); lifecycleState.setStepTime(now); lifecycleState.setFailedStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState( - indexName, - indexSettingsBuilder, - lifecycleState.build(), - Collections.singletonList(policyMetadata) - ); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of(policyMetadata)); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep(clusterState, indexName, () -> now, policyRegistry, false) @@ -840,7 +821,7 @@ public void testMoveClusterStateToFailedStepIndexNotFound() { existingIndexName, Settings.builder(), LifecycleExecutionState.builder().build(), - Collections.emptyList() + List.of() ); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -863,7 +844,7 @@ public void testMoveClusterStateToFailedStepInvalidPolicySetting() { lifecycleState.setAction(errorStepKey.action()); lifecycleState.setStep(errorStepKey.name()); lifecycleState.setFailedStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep(clusterState, indexName, () -> now, policyRegistry, false) @@ -883,7 +864,7 @@ public void testMoveClusterStateToFailedNotOnError() { lifecycleState.setPhase(failedStepKey.phase()); lifecycleState.setAction(failedStepKey.action()); lifecycleState.setStep(failedStepKey.name()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of()); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep(clusterState, indexName, () -> now, policyRegistry, false) @@ -906,7 +887,7 @@ public void testMoveClusterStateToPreviouslyFailedStepAsAutomaticRetryAndSetsPre LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata( policy, - Collections.emptyMap(), + Map.of(), randomNonNegativeLong(), randomNonNegativeLong() ); @@ -923,12 +904,7 @@ public void testMoveClusterStateToPreviouslyFailedStepAsAutomaticRetryAndSetsPre lifecycleState.setFailedStep(failedStepKey.name()); String initialStepInfo = randomAlphaOfLengthBetween(10, 50); lifecycleState.setStepInfo(initialStepInfo); - ClusterState clusterState = buildClusterState( - indexName, - indexSettingsBuilder, - lifecycleState.build(), - Collections.singletonList(policyMetadata) - ); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), List.of(policyMetadata)); Index index = clusterState.metadata().index(indexName).getIndex(); ClusterState nextClusterState = IndexLifecycleTransition.moveClusterStateToPreviouslyFailedStep( clusterState, @@ -976,13 +952,11 @@ public void testMoveToFailedStepDoesntRefreshCachedPhaseWhenUnsafe() { Map actions = new HashMap<>(); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy currentPolicy = new LifecyclePolicy("my-policy", phases); List policyMetadatas = new ArrayList<>(); - policyMetadatas.add( - new LifecyclePolicyMetadata(currentPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ); + policyMetadatas.add(new LifecyclePolicyMetadata(currentPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())); Step.StepKey errorStepKey = new Step.StepKey("hot", RolloverAction.NAME, ErrorStep.NAME); PolicyStepsRegistry stepsRegistry = createOneStepPolicyStepRegistry("my-policy", new ErrorStep(errorStepKey)); @@ -1040,9 +1014,9 @@ public void testRefreshPhaseJson() throws IOException { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy newPolicy = new LifecyclePolicy("my-policy", phases); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), 2L, 2L); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(newPolicy, Map.of(), 2L, 2L); ClusterState existingState = ClusterState.builder(ClusterState.EMPTY_STATE) .metadata(Metadata.builder(Metadata.EMPTY_METADATA).put(meta, false).build()) @@ -1185,7 +1159,7 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { actions.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); actions.put("set_priority", new SetPriorityAction(100)); Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); - Map phases = Collections.singletonMap("hot", hotPhase); + Map phases = Map.of("hot", hotPhase); LifecyclePolicy currentPolicy = new LifecyclePolicy("my-policy", phases); { @@ -1195,10 +1169,10 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { Map actionsWithoutRollover = new HashMap<>(); actionsWithoutRollover.put("set_priority", new SetPriorityAction(100)); Phase hotPhaseNoRollover = new Phase("hot", TimeValue.ZERO, actionsWithoutRollover); - Map phasesNoRollover = Collections.singletonMap("hot", hotPhaseNoRollover); + Map phasesNoRollover = Map.of("hot", hotPhaseNoRollover); LifecyclePolicyMetadata updatedPolicyMetadata = new LifecyclePolicyMetadata( new LifecyclePolicy("my-policy", phasesNoRollover), - Collections.emptyMap(), + Map.of(), 2L, 2L ); @@ -1233,10 +1207,10 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { Map actionsWitoutSetPriority = new HashMap<>(); actionsWitoutSetPriority.put("rollover", new RolloverAction(null, null, null, 1L, null, null, null, null, null, null)); Phase hotPhaseNoSetPriority = new Phase("hot", TimeValue.ZERO, actionsWitoutSetPriority); - Map phasesWithoutSetPriority = Collections.singletonMap("hot", hotPhaseNoSetPriority); + Map phasesWithoutSetPriority = Map.of("hot", hotPhaseNoSetPriority); LifecyclePolicyMetadata updatedPolicyMetadata = new LifecyclePolicyMetadata( new LifecyclePolicy("my-policy", phasesWithoutSetPriority), - Collections.emptyMap(), + Map.of(), 2L, 2L ); @@ -1275,7 +1249,7 @@ private static LifecyclePolicy createPolicy(String policyName, Step.StepKey safe assert unsafeStep == null || safeStep.phase().equals(unsafeStep.phase()) == false : "safe and unsafe actions must be in different phases"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(safeStep, null)); + List steps = List.of(new MockStep(safeStep, null)); MockAction safeAction = new MockAction(steps, true); actions.put(safeAction.getWriteableName(), safeAction); Phase phase = new Phase(safeStep.phase(), TimeValue.timeValueMillis(0), actions); @@ -1284,7 +1258,7 @@ private static LifecyclePolicy createPolicy(String policyName, Step.StepKey safe if (unsafeStep != null) { assert MockAction.NAME.equals(unsafeStep.action()) : "The unsafe action needs to be MockAction.NAME"; Map actions = new HashMap<>(); - List steps = Collections.singletonList(new MockStep(unsafeStep, null)); + List steps = List.of(new MockStep(unsafeStep, null)); MockAction unsafeAction = new MockAction(steps, false); actions.put(unsafeAction.getWriteableName(), unsafeAction); Phase phase = new Phase(unsafeStep.phase(), TimeValue.timeValueMillis(0), actions); @@ -1436,6 +1410,6 @@ private void assertClusterStateStepInfo( assertEquals(expectedstepInfoValue, newLifecycleState.stepInfo()); assertEquals(oldLifecycleState.phaseTime(), newLifecycleState.phaseTime()); assertEquals(oldLifecycleState.actionTime(), newLifecycleState.actionTime()); - assertEquals(newLifecycleState.stepTime(), newLifecycleState.stepTime()); + assertEquals(oldLifecycleState.stepTime(), newLifecycleState.stepTime()); } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java index eee3fe3ce53c2..81688ec1503cd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.junit.Before; -import java.util.Collections; +import java.util.Map; import static org.elasticsearch.cluster.metadata.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; import static org.hamcrest.Matchers.containsString; @@ -53,10 +53,7 @@ public void setupClusterState() { .build(); index = indexMetadata.getIndex(); IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( - Collections.singletonMap( - policy, - new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING ); Metadata metadata = Metadata.builder() diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java index f9a8d4a2ab486..554e9a48c625e 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java @@ -29,7 +29,6 @@ import org.junit.Before; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -67,10 +66,7 @@ public void setupClusterState() { index = indexMetadata.getIndex(); lifecyclePolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policy); IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( - Collections.singletonMap( - policy, - new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING ); Metadata metadata = Metadata.builder() @@ -95,7 +91,7 @@ public void testExecuteSuccessfullyMoved() throws Exception { AlwaysExistingStepRegistry stepRegistry = new AlwaysExistingStepRegistry(client); stepRegistry.update( new IndexLifecycleMetadata( - Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), 2L, 2L)), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), 2L, 2L)), OperationMode.RUNNING ) ); @@ -169,7 +165,7 @@ public void testExecuteSuccessfulMoveWithInvalidNextStep() throws Exception { AlwaysExistingStepRegistry stepRegistry = new AlwaysExistingStepRegistry(client); stepRegistry.update( new IndexLifecycleMetadata( - Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), 2L, 2L)), + Map.of(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Map.of(), 2L, 2L)), OperationMode.RUNNING ) ); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java index 36d537a57382c..f61267d40a513 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.xpack.core.ilm.Step; import org.mockito.Mockito; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -76,7 +75,7 @@ private IndexMetadata emptyMetadata(Index index) { public void testGetFirstStep() { String policyName = randomAlphaOfLengthBetween(2, 10); Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); - Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + Map firstStepMap = Map.of(policyName, expectedFirstStep); PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null, null); Step actualFirstStep = registry.getFirstStep(policyName); assertThat(actualFirstStep, sameInstance(expectedFirstStep)); @@ -85,7 +84,7 @@ public void testGetFirstStep() { public void testGetFirstStepUnknownPolicy() { String policyName = randomAlphaOfLengthBetween(2, 10); Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); - Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + Map firstStepMap = Map.of(policyName, expectedFirstStep); PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null, null); Step actualFirstStep = registry.getFirstStep(policyName + "unknown"); assertNull(actualFirstStep); @@ -95,7 +94,7 @@ public void testGetStep() { Client client = mock(Client.class); Mockito.when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); String phaseName = randomFrom(policy.getPhases().keySet()); Phase phase = policy.getPhases().get(phaseName); PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); @@ -119,7 +118,7 @@ public void testGetStepErrorStep() { Step.StepKey errorStepKey = new Step.StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), ErrorStep.NAME); Step expectedStep = new ErrorStep(errorStepKey); Index index = new Index("test", "uuid"); - Map> indexSteps = Collections.singletonMap(index, Collections.singletonList(expectedStep)); + Map> indexSteps = Map.of(index, List.of(expectedStep)); PolicyStepsRegistry registry = new PolicyStepsRegistry(null, null, null, NamedXContentRegistry.EMPTY, null, null); Step actualStep = registry.getStep(emptyMetadata(index), errorStepKey); assertThat(actualStep, equalTo(expectedStep)); @@ -143,7 +142,7 @@ public void testGetStepForIndexWithNoPhaseGetsInitializationStep() { Client client = mock(Client.class); Mockito.when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicy("policy"); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "policy").build()) .build(); @@ -158,7 +157,7 @@ public void testGetStepUnknownStepKey() { Client client = mock(Client.class); Mockito.when(client.settings()).thenReturn(Settings.EMPTY); LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); - LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong()); String phaseName = randomFrom(policy.getPhases().keySet()); Phase phase = policy.getPhases().get(phaseName); PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); @@ -193,7 +192,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); } - Map policyMap = Collections.singletonMap( + Map policyMap = Map.of( newPolicy.getName(), new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -271,7 +270,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { assertThat(registry.getStepMap(), equalTo(registryStepMap)); // remove policy - lifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + lifecycleMetadata = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING); currentState = ClusterState.builder(currentState) .metadata(Metadata.builder(metadata).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)) .build(); @@ -291,7 +290,7 @@ public void testUpdateChangedPolicy() { headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); } - Map policyMap = Collections.singletonMap( + Map policyMap = Map.of( newPolicy.getName(), new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -316,10 +315,7 @@ public void testUpdateChangedPolicy() { // swap out policy newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); lifecycleMetadata = new IndexLifecycleMetadata( - Collections.singletonMap( - policyName, - new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()) - ), + Map.of(policyName, new LifecyclePolicyMetadata(newPolicy, Map.of(), randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING ); currentState = ClusterState.builder(currentState) @@ -356,7 +352,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); } - Map policyMap = Collections.singletonMap( + Map policyMap = Map.of( newPolicy.getName(), new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -411,7 +407,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1)); // Update the policy with the new policy, but keep the phase the same - policyMap = Collections.singletonMap( + policyMap = Map.of( updatedPolicy.getName(), new LifecyclePolicyMetadata(updatedPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); @@ -457,7 +453,7 @@ public void testGetStepMultithreaded() throws Exception { .build(); SortedMap metas = new TreeMap<>(); - metas.put("policy", new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong())); + metas.put("policy", new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong())); IndexLifecycleMetadata meta = new IndexLifecycleMetadata(metas, OperationMode.RUNNING); PolicyStepsRegistry registry = new PolicyStepsRegistry(REGISTRY, client, null); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java index be2d449353242..95412f92b6156 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/StagnatingIndicesFinderTests.java @@ -28,7 +28,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; -import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -61,7 +60,7 @@ public void testStagnatingIndicesFinder() { assertEquals(expectedMaxTimeOnStep, maxTimeOnStep); assertEquals(expectedMaxRetriesPerStep, maxRetriesPerStep); return rc; - }).collect(Collectors.toList()); + }).toList(); // Per the evaluator, the timeSupplier _must_ be called only twice when(mockedTimeSupplier.getAsLong()).thenReturn(instant, instant); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java index 8c0fede4c11dc..bd0d63ebb0f3d 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java @@ -24,7 +24,8 @@ import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.mockito.ArgumentMatcher; -import static java.util.Collections.emptyMap; +import java.util.Map; + import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; @@ -50,7 +51,7 @@ public void testStopILMClusterStatePriorityIsImmediate() { ILMActions.STOP.name(), "description", new TaskId(randomLong() + ":" + randomLong()), - emptyMap() + Map.of() ); StopILMRequest request = new StopILMRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); transportStopILMAction.masterOperation(task, request, ClusterState.EMPTY_STATE, ActionListener.noop()); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java index 86c0128a3e53c..1716057cdfe46 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java @@ -21,6 +21,9 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEvent; import org.junit.ClassRule; @@ -341,10 +344,21 @@ protected Deque streamInferOnMockService(String modelId, TaskTy return callAsync(endpoint, input); } + protected Deque unifiedCompletionInferOnMockService(String modelId, TaskType taskType, List input) + throws Exception { + var endpoint = Strings.format("_inference/%s/%s/_unified", taskType, modelId); + return callAsyncUnified(endpoint, input, "user"); + } + private Deque callAsync(String endpoint, List input) throws Exception { - var responseConsumer = new AsyncInferenceResponseConsumer(); var request = new Request("POST", endpoint); request.setJsonEntity(jsonBody(input, null)); + + return execAsyncCall(request); + } + + private Deque execAsyncCall(Request request) throws Exception { + var responseConsumer = new AsyncInferenceResponseConsumer(); request.setOptions(RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(() -> responseConsumer).build()); var latch = new CountDownLatch(1); client().performRequestAsync(request, new ResponseListener() { @@ -362,6 +376,22 @@ public void onFailure(Exception exception) { return responseConsumer.events(); } + private Deque callAsyncUnified(String endpoint, List input, String role) throws Exception { + var request = new Request("POST", endpoint); + + request.setJsonEntity(createUnifiedJsonBody(input, role)); + return execAsyncCall(request); + } + + private String createUnifiedJsonBody(List input, String role) throws IOException { + var messages = input.stream().map(i -> Map.of("content", i, "role", role)).toList(); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.startObject(); + builder.field("messages", messages); + builder.endObject(); + return org.elasticsearch.common.Strings.toString(builder); + } + protected Map infer(String modelId, TaskType taskType, List input) throws IOException { var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); return inferInternal(endpoint, input, null, Map.of()); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 61ffa5c75ed9f..0816da48514b0 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -11,6 +11,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.inference.TaskType; @@ -327,6 +328,56 @@ public void testSupportedStream() throws Exception { } } + public void testUnifiedCompletionInference() throws Exception { + String modelId = "streaming"; + putModel(modelId, mockCompletionServiceModelConfig(TaskType.COMPLETION)); + var singleModel = getModel(modelId); + assertEquals(modelId, singleModel.get("inference_id")); + assertEquals(TaskType.COMPLETION.toString(), singleModel.get("task_type")); + + var input = IntStream.range(1, 2 + randomInt(8)).mapToObj(i -> randomUUID()).toList(); + try { + var events = unifiedCompletionInferOnMockService(modelId, TaskType.COMPLETION, input); + var expectedResponses = expectedResultsIterator(input); + assertThat(events.size(), equalTo((input.size() + 1) * 2)); + events.forEach(event -> { + switch (event.name()) { + case EVENT -> assertThat(event.value(), equalToIgnoringCase("message")); + case DATA -> assertThat(event.value(), equalTo(expectedResponses.next())); + } + }); + } finally { + deleteModel(modelId); + } + } + + private static Iterator expectedResultsIterator(List input) { + return Stream.concat(input.stream().map(String::toUpperCase).map(InferenceCrudIT::expectedResult), Stream.of("[DONE]")).iterator(); + } + + private static String expectedResult(String input) { + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.startObject(); + builder.field("id", "id"); + builder.startArray("choices"); + builder.startObject(); + builder.startObject("delta"); + builder.field("content", input); + builder.endObject(); + builder.field("index", 0); + builder.endObject(); + builder.endArray(); + builder.field("model", "gpt-4o-2024-08-06"); + builder.field("object", "chat.completion.chunk"); + builder.endObject(); + + return Strings.toString(builder); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + public void testGetZeroModels() throws IOException { var models = getModels("_all", TaskType.COMPLETION); assertThat(models, empty()); diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java index ae11a02d312e2..f5f682b143a72 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java @@ -31,6 +31,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; @@ -132,6 +133,16 @@ public void infer( } } + @Override + public void unifiedCompletionInfer( + Model model, + UnifiedCompletionRequest request, + TimeValue timeout, + ActionListener listener + ) { + listener.onFailure(new UnsupportedOperationException("unifiedCompletionInfer not supported")); + } + @Override public void chunkedInfer( Model model, diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java index 9320571572f0a..fa1e27005c287 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java @@ -29,6 +29,7 @@ import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; @@ -120,6 +121,16 @@ public void infer( } } + @Override + public void unifiedCompletionInfer( + Model model, + UnifiedCompletionRequest request, + TimeValue timeout, + ActionListener listener + ) { + listener.onFailure(new UnsupportedOperationException("unifiedCompletionInfer not supported")); + } + @Override public void chunkedInfer( Model model, diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java index fe0223cce0323..64569fd8c5c6a 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java @@ -29,6 +29,7 @@ import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; @@ -123,6 +124,16 @@ public void infer( } } + @Override + public void unifiedCompletionInfer( + Model model, + UnifiedCompletionRequest request, + TimeValue timeout, + ActionListener listener + ) { + throw new UnsupportedOperationException("unifiedCompletionInfer not supported"); + } + @Override public void chunkedInfer( Model model, diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java index 6d7983bc8cb53..f7a05a27354ef 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java @@ -30,12 +30,14 @@ import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.inference.results.StreamingChatCompletionResults; +import org.elasticsearch.xpack.core.inference.results.StreamingUnifiedChatCompletionResults; import java.io.IOException; import java.util.EnumSet; @@ -121,6 +123,24 @@ public void infer( } } + @Override + public void unifiedCompletionInfer( + Model model, + UnifiedCompletionRequest request, + TimeValue timeout, + ActionListener listener + ) { + switch (model.getConfigurations().getTaskType()) { + case COMPLETION -> listener.onResponse(makeUnifiedResults(request)); + default -> listener.onFailure( + new ElasticsearchStatusException( + TaskType.unsupportedTaskTypeErrorMsg(model.getConfigurations().getTaskType(), name()), + RestStatus.BAD_REQUEST + ) + ); + } + } + private StreamingChatCompletionResults makeResults(List input) { var responseIter = input.stream().map(String::toUpperCase).iterator(); return new StreamingChatCompletionResults(subscriber -> { @@ -152,6 +172,59 @@ private ChunkedToXContent completionChunk(String delta) { ); } + private StreamingUnifiedChatCompletionResults makeUnifiedResults(UnifiedCompletionRequest request) { + var responseIter = request.messages().stream().map(message -> message.content().toString().toUpperCase()).iterator(); + return new StreamingUnifiedChatCompletionResults(subscriber -> { + subscriber.onSubscribe(new Flow.Subscription() { + @Override + public void request(long n) { + if (responseIter.hasNext()) { + subscriber.onNext(unifiedCompletionChunk(responseIter.next())); + } else { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + }); + } + + /* + The response format looks like this + { + "id": "chatcmpl-AarrzyuRflye7yzDF4lmVnenGmQCF", + "choices": [ + { + "delta": { + "content": " information" + }, + "index": 0 + } + ], + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk" + } + */ + private ChunkedToXContent unifiedCompletionChunk(String delta) { + return params -> Iterators.concat( + ChunkedToXContentHelper.startObject(), + ChunkedToXContentHelper.field("id", "id"), + ChunkedToXContentHelper.startArray("choices"), + ChunkedToXContentHelper.startObject(), + ChunkedToXContentHelper.startObject("delta"), + ChunkedToXContentHelper.field("content", delta), + ChunkedToXContentHelper.endObject(), + ChunkedToXContentHelper.field("index", 0), + ChunkedToXContentHelper.endObject(), + ChunkedToXContentHelper.endArray(), + ChunkedToXContentHelper.field("model", "gpt-4o-2024-08-06"), + ChunkedToXContentHelper.field("object", "chat.completion.chunk"), + ChunkedToXContentHelper.endObject() + ); + } + @Override public void chunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 513945a0a8635..a7a6004c0ebb2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -10,6 +10,7 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; +import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; @@ -41,7 +42,8 @@ public Set getTestFeatures() { SemanticTextFieldMapper.SEMANTIC_TEXT_DELETE_FIX, SemanticTextFieldMapper.SEMANTIC_TEXT_ZERO_SIZE_FIX, SemanticTextFieldMapper.SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX, - SEMANTIC_TEXT_HIGHLIGHTER + SEMANTIC_TEXT_HIGHLIGHTER, + SemanticMatchQueryRewriteInterceptor.SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index 673b841317a3d..a4187f4c4fa90 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -16,6 +16,7 @@ import org.elasticsearch.inference.SecretSettings; import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; import org.elasticsearch.xpack.core.inference.results.InferenceChunkedSparseEmbeddingResults; @@ -137,11 +138,18 @@ public static List getNamedWriteables() { addEisNamedWriteables(namedWriteables); addAlibabaCloudSearchNamedWriteables(namedWriteables); + addUnifiedNamedWriteables(namedWriteables); + namedWriteables.addAll(StreamingTaskManager.namedWriteables()); return namedWriteables; } + private static void addUnifiedNamedWriteables(List namedWriteables) { + var writeables = UnifiedCompletionRequest.getNamedWriteables(); + namedWriteables.addAll(writeables); + } + private static void addAmazonBedrockNamedWriteables(List namedWriteables) { namedWriteables.add( new NamedWriteableRegistry.Entry( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 8d5acbad26658..d2dc49a39888f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -33,6 +33,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; @@ -49,6 +50,7 @@ import org.elasticsearch.xpack.core.inference.action.GetInferenceServicesAction; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.action.PutInferenceModelAction; +import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; import org.elasticsearch.xpack.core.inference.action.UpdateInferenceModelAction; import org.elasticsearch.xpack.inference.action.TransportDeleteInferenceEndpointAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceDiagnosticsAction; @@ -57,6 +59,7 @@ import org.elasticsearch.xpack.inference.action.TransportInferenceAction; import org.elasticsearch.xpack.inference.action.TransportInferenceUsageAction; import org.elasticsearch.xpack.inference.action.TransportPutInferenceModelAction; +import org.elasticsearch.xpack.inference.action.TransportUnifiedCompletionInferenceAction; import org.elasticsearch.xpack.inference.action.TransportUpdateInferenceModelAction; import org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilter; import org.elasticsearch.xpack.inference.common.Truncator; @@ -70,6 +73,7 @@ import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.mapper.OffsetSourceFieldMapper; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; +import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; @@ -83,6 +87,7 @@ import org.elasticsearch.xpack.inference.rest.RestInferenceAction; import org.elasticsearch.xpack.inference.rest.RestPutInferenceModelAction; import org.elasticsearch.xpack.inference.rest.RestStreamInferenceAction; +import org.elasticsearch.xpack.inference.rest.RestUnifiedCompletionInferenceAction; import org.elasticsearch.xpack.inference.rest.RestUpdateInferenceModelAction; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchService; @@ -103,7 +108,6 @@ import org.elasticsearch.xpack.inference.services.ibmwatsonx.IbmWatsonxService; import org.elasticsearch.xpack.inference.services.mistral.MistralService; import org.elasticsearch.xpack.inference.services.openai.OpenAiService; -import org.elasticsearch.xpack.inference.telemetry.ApmInferenceStats; import org.elasticsearch.xpack.inference.telemetry.InferenceStats; import java.util.ArrayList; @@ -153,8 +157,9 @@ public InferencePlugin(Settings settings) { @Override public List> getActions() { - return List.of( + var availableActions = List.of( new ActionHandler<>(InferenceAction.INSTANCE, TransportInferenceAction.class), + new ActionHandler<>(GetInferenceModelAction.INSTANCE, TransportGetInferenceModelAction.class), new ActionHandler<>(PutInferenceModelAction.INSTANCE, TransportPutInferenceModelAction.class), new ActionHandler<>(UpdateInferenceModelAction.INSTANCE, TransportUpdateInferenceModelAction.class), @@ -163,6 +168,13 @@ public InferencePlugin(Settings settings) { new ActionHandler<>(GetInferenceDiagnosticsAction.INSTANCE, TransportGetInferenceDiagnosticsAction.class), new ActionHandler<>(GetInferenceServicesAction.INSTANCE, TransportGetInferenceServicesAction.class) ); + + List> conditionalActions = + UnifiedCompletionFeature.UNIFIED_COMPLETION_FEATURE_FLAG.isEnabled() + ? List.of(new ActionHandler<>(UnifiedCompletionAction.INSTANCE, TransportUnifiedCompletionInferenceAction.class)) + : List.of(); + + return Stream.concat(availableActions.stream(), conditionalActions.stream()).toList(); } @Override @@ -177,7 +189,7 @@ public List getRestHandlers( Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - return List.of( + var availableRestActions = List.of( new RestInferenceAction(), new RestStreamInferenceAction(), new RestGetInferenceModelAction(), @@ -186,6 +198,11 @@ public List getRestHandlers( new RestDeleteInferenceEndpointAction(), new RestGetInferenceDiagnosticsAction() ); + List conditionalRestActions = UnifiedCompletionFeature.UNIFIED_COMPLETION_FEATURE_FLAG.isEnabled() + ? List.of(new RestUnifiedCompletionInferenceAction()) + : List.of(); + + return Stream.concat(availableRestActions.stream(), conditionalRestActions.stream()).toList(); } @Override @@ -238,7 +255,7 @@ public Collection createComponents(PluginServices services) { shardBulkInferenceActionFilter.set(actionFilter); var meterRegistry = services.telemetryProvider().getMeterRegistry(); - var stats = new PluginComponentBinding<>(InferenceStats.class, ApmInferenceStats.create(meterRegistry)); + var stats = new PluginComponentBinding<>(InferenceStats.class, InferenceStats.create(meterRegistry)); return List.of(modelRegistry, registry, httpClientManager, stats); } @@ -385,6 +402,11 @@ public List> getQueries() { return List.of(new QuerySpec<>(SemanticQueryBuilder.NAME, SemanticQueryBuilder::new, SemanticQueryBuilder::fromXContent)); } + @Override + public List getQueryRewriteInterceptors() { + return List.of(new SemanticMatchQueryRewriteInterceptor()); + } + @Override public List> getRetrievers() { return List.of( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/UnifiedCompletionFeature.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/UnifiedCompletionFeature.java new file mode 100644 index 0000000000000..3e13d0c1e39de --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/UnifiedCompletionFeature.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference; + +import org.elasticsearch.common.util.FeatureFlag; + +/** + * Unified Completion feature flag. When the feature is complete, this flag will be removed. + * Enable feature via JVM option: `-Des.inference_unified_feature_flag_enabled=true`. + */ +public class UnifiedCompletionFeature { + public static final FeatureFlag UNIFIED_COMPLETION_FEATURE_FLAG = new FeatureFlag("inference_unified"); + + private UnifiedCompletionFeature() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceAction.java new file mode 100644 index 0000000000000..2a0e8e1775279 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceAction.java @@ -0,0 +1,250 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceRegistry; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnparsedModel; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.inference.action.BaseInferenceActionRequest; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; +import org.elasticsearch.xpack.inference.common.DelegatingProcessor; +import org.elasticsearch.xpack.inference.registry.ModelRegistry; +import org.elasticsearch.xpack.inference.telemetry.InferenceStats; +import org.elasticsearch.xpack.inference.telemetry.InferenceTimer; + +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.telemetry.InferenceStats.modelAttributes; +import static org.elasticsearch.xpack.inference.telemetry.InferenceStats.responseAttributes; + +public abstract class BaseTransportInferenceAction extends HandledTransportAction< + Request, + InferenceAction.Response> { + + private static final Logger log = LogManager.getLogger(BaseTransportInferenceAction.class); + private static final String STREAMING_INFERENCE_TASK_TYPE = "streaming_inference"; + private static final String STREAMING_TASK_ACTION = "xpack/inference/streaming_inference[n]"; + private final ModelRegistry modelRegistry; + private final InferenceServiceRegistry serviceRegistry; + private final InferenceStats inferenceStats; + private final StreamingTaskManager streamingTaskManager; + + public BaseTransportInferenceAction( + String inferenceActionName, + TransportService transportService, + ActionFilters actionFilters, + ModelRegistry modelRegistry, + InferenceServiceRegistry serviceRegistry, + InferenceStats inferenceStats, + StreamingTaskManager streamingTaskManager, + Writeable.Reader requestReader + ) { + super(inferenceActionName, transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.modelRegistry = modelRegistry; + this.serviceRegistry = serviceRegistry; + this.inferenceStats = inferenceStats; + this.streamingTaskManager = streamingTaskManager; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + var timer = InferenceTimer.start(); + + var getModelListener = ActionListener.wrap((UnparsedModel unparsedModel) -> { + var service = serviceRegistry.getService(unparsedModel.service()); + try { + validationHelper(service::isEmpty, () -> unknownServiceException(unparsedModel.service(), request.getInferenceEntityId())); + validationHelper( + () -> request.getTaskType().isAnyOrSame(unparsedModel.taskType()) == false, + () -> requestModelTaskTypeMismatchException(request.getTaskType(), unparsedModel.taskType()) + ); + validationHelper( + () -> isInvalidTaskTypeForInferenceEndpoint(request, unparsedModel), + () -> createInvalidTaskTypeException(request, unparsedModel) + ); + } catch (Exception e) { + recordMetrics(unparsedModel, timer, e); + listener.onFailure(e); + return; + } + + var model = service.get() + .parsePersistedConfigWithSecrets( + unparsedModel.inferenceEntityId(), + unparsedModel.taskType(), + unparsedModel.settings(), + unparsedModel.secrets() + ); + inferOnServiceWithMetrics(model, request, service.get(), timer, listener); + }, e -> { + try { + inferenceStats.inferenceDuration().record(timer.elapsedMillis(), responseAttributes(e)); + } catch (Exception metricsException) { + log.atDebug().withThrowable(metricsException).log("Failed to record metrics when the model is missing, dropping metrics"); + } + listener.onFailure(e); + }); + + modelRegistry.getModelWithSecrets(request.getInferenceEntityId(), getModelListener); + } + + private static void validationHelper(Supplier validationFailure, Supplier exceptionCreator) { + if (validationFailure.get()) { + throw exceptionCreator.get(); + } + } + + protected abstract boolean isInvalidTaskTypeForInferenceEndpoint(Request request, UnparsedModel unparsedModel); + + protected abstract ElasticsearchStatusException createInvalidTaskTypeException(Request request, UnparsedModel unparsedModel); + + private void recordMetrics(UnparsedModel model, InferenceTimer timer, @Nullable Throwable t) { + try { + inferenceStats.inferenceDuration().record(timer.elapsedMillis(), responseAttributes(model, t)); + } catch (Exception e) { + log.atDebug().withThrowable(e).log("Failed to record metrics with an unparsed model, dropping metrics"); + } + } + + private void inferOnServiceWithMetrics( + Model model, + Request request, + InferenceService service, + InferenceTimer timer, + ActionListener listener + ) { + inferenceStats.requestCount().incrementBy(1, modelAttributes(model)); + inferOnService(model, request, service, ActionListener.wrap(inferenceResults -> { + if (request.isStreaming()) { + var taskProcessor = streamingTaskManager.create(STREAMING_INFERENCE_TASK_TYPE, STREAMING_TASK_ACTION); + inferenceResults.publisher().subscribe(taskProcessor); + + var instrumentedStream = new PublisherWithMetrics(timer, model); + taskProcessor.subscribe(instrumentedStream); + + listener.onResponse(new InferenceAction.Response(inferenceResults, instrumentedStream)); + } else { + recordMetrics(model, timer, null); + listener.onResponse(new InferenceAction.Response(inferenceResults)); + } + }, e -> { + recordMetrics(model, timer, e); + listener.onFailure(e); + })); + } + + private void recordMetrics(Model model, InferenceTimer timer, @Nullable Throwable t) { + try { + inferenceStats.inferenceDuration().record(timer.elapsedMillis(), responseAttributes(model, t)); + } catch (Exception e) { + log.atDebug().withThrowable(e).log("Failed to record metrics with a parsed model, dropping metrics"); + } + } + + private void inferOnService(Model model, Request request, InferenceService service, ActionListener listener) { + if (request.isStreaming() == false || service.canStream(request.getTaskType())) { + doInference(model, request, service, listener); + } else { + listener.onFailure(unsupportedStreamingTaskException(request, service)); + } + } + + protected abstract void doInference( + Model model, + Request request, + InferenceService service, + ActionListener listener + ); + + private ElasticsearchStatusException unsupportedStreamingTaskException(Request request, InferenceService service) { + var supportedTasks = service.supportedStreamingTasks(); + if (supportedTasks.isEmpty()) { + return new ElasticsearchStatusException( + format("Streaming is not allowed for service [%s].", service.name()), + RestStatus.METHOD_NOT_ALLOWED + ); + } else { + var validTasks = supportedTasks.stream().map(TaskType::toString).collect(Collectors.joining(",")); + return new ElasticsearchStatusException( + format( + "Streaming is not allowed for service [%s] and task [%s]. Supported tasks: [%s]", + service.name(), + request.getTaskType(), + validTasks + ), + RestStatus.METHOD_NOT_ALLOWED + ); + } + } + + private static ElasticsearchStatusException unknownServiceException(String service, String inferenceId) { + return new ElasticsearchStatusException("Unknown service [{}] for model [{}]. ", RestStatus.BAD_REQUEST, service, inferenceId); + } + + private static ElasticsearchStatusException requestModelTaskTypeMismatchException(TaskType requested, TaskType expected) { + return new ElasticsearchStatusException( + "Incompatible task_type, the requested type [{}] does not match the model type [{}]", + RestStatus.BAD_REQUEST, + requested, + expected + ); + } + + private class PublisherWithMetrics extends DelegatingProcessor { + + private final InferenceTimer timer; + private final Model model; + + private PublisherWithMetrics(InferenceTimer timer, Model model) { + this.timer = timer; + this.model = model; + } + + @Override + protected void next(ChunkedToXContent item) { + downstream().onNext(item); + } + + @Override + public void onError(Throwable throwable) { + recordMetrics(model, timer, throwable); + super.onError(throwable); + } + + @Override + protected void onCancel() { + recordMetrics(model, timer, null); + super.onCancel(); + } + + @Override + public void onComplete() { + recordMetrics(model, timer, null); + super.onComplete(); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java index 4045734546596..0c12b5bb0fba6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java @@ -10,41 +10,18 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceRegistry; -import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.Model; -import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.UnparsedModel; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.telemetry.InferenceStats; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.elasticsearch.core.Strings.format; - -public class TransportInferenceAction extends HandledTransportAction { - private static final String STREAMING_INFERENCE_TASK_TYPE = "streaming_inference"; - private static final String STREAMING_TASK_ACTION = "xpack/inference/streaming_inference[n]"; - - private static final Set> supportsStreaming = Set.of(); - - private final ModelRegistry modelRegistry; - private final InferenceServiceRegistry serviceRegistry; - private final InferenceStats inferenceStats; - private final StreamingTaskManager streamingTaskManager; - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(TransportInferenceAction.class); +public class TransportInferenceAction extends BaseTransportInferenceAction { @Inject public TransportInferenceAction( @@ -55,111 +32,44 @@ public TransportInferenceAction( InferenceStats inferenceStats, StreamingTaskManager streamingTaskManager ) { - super(InferenceAction.NAME, transportService, actionFilters, InferenceAction.Request::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); - this.modelRegistry = modelRegistry; - this.serviceRegistry = serviceRegistry; - this.inferenceStats = inferenceStats; - this.streamingTaskManager = streamingTaskManager; + super( + InferenceAction.NAME, + transportService, + actionFilters, + modelRegistry, + serviceRegistry, + inferenceStats, + streamingTaskManager, + InferenceAction.Request::new + ); } @Override - protected void doExecute(Task task, InferenceAction.Request request, ActionListener listener) { - - ActionListener getModelListener = listener.delegateFailureAndWrap((delegate, unparsedModel) -> { - var service = serviceRegistry.getService(unparsedModel.service()); - if (service.isEmpty()) { - listener.onFailure(unknownServiceException(unparsedModel.service(), request.getInferenceEntityId())); - return; - } - - if (request.getTaskType().isAnyOrSame(unparsedModel.taskType()) == false) { - // not the wildcard task type and not the model task type - listener.onFailure(incompatibleTaskTypeException(request.getTaskType(), unparsedModel.taskType())); - return; - } - - var model = service.get() - .parsePersistedConfigWithSecrets( - unparsedModel.inferenceEntityId(), - unparsedModel.taskType(), - unparsedModel.settings(), - unparsedModel.secrets() - ); - inferOnService(model, request, service.get(), delegate); - }); + protected boolean isInvalidTaskTypeForInferenceEndpoint(InferenceAction.Request request, UnparsedModel unparsedModel) { + return false; + } - modelRegistry.getModelWithSecrets(request.getInferenceEntityId(), getModelListener); + @Override + protected ElasticsearchStatusException createInvalidTaskTypeException(InferenceAction.Request request, UnparsedModel unparsedModel) { + return null; } - private void inferOnService( + @Override + protected void doInference( Model model, InferenceAction.Request request, InferenceService service, ActionListener listener ) { - if (request.isStreaming() == false || service.canStream(request.getTaskType())) { - inferenceStats.incrementRequestCount(model); - service.infer( - model, - request.getQuery(), - request.getInput(), - request.isStreaming(), - request.getTaskSettings(), - request.getInputType(), - request.getInferenceTimeout(), - createListener(request, listener) - ); - } else { - listener.onFailure(unsupportedStreamingTaskException(request, service)); - } - } - - private ElasticsearchStatusException unsupportedStreamingTaskException(InferenceAction.Request request, InferenceService service) { - var supportedTasks = service.supportedStreamingTasks(); - if (supportedTasks.isEmpty()) { - return new ElasticsearchStatusException( - format("Streaming is not allowed for service [%s].", service.name()), - RestStatus.METHOD_NOT_ALLOWED - ); - } else { - var validTasks = supportedTasks.stream().map(TaskType::toString).collect(Collectors.joining(",")); - return new ElasticsearchStatusException( - format( - "Streaming is not allowed for service [%s] and task [%s]. Supported tasks: [%s]", - service.name(), - request.getTaskType(), - validTasks - ), - RestStatus.METHOD_NOT_ALLOWED - ); - } - } - - private ActionListener createListener( - InferenceAction.Request request, - ActionListener listener - ) { - if (request.isStreaming()) { - return listener.delegateFailureAndWrap((l, inferenceResults) -> { - var taskProcessor = streamingTaskManager.create(STREAMING_INFERENCE_TASK_TYPE, STREAMING_TASK_ACTION); - inferenceResults.publisher().subscribe(taskProcessor); - l.onResponse(new InferenceAction.Response(inferenceResults, taskProcessor)); - }); - } - return listener.delegateFailureAndWrap((l, inferenceResults) -> l.onResponse(new InferenceAction.Response(inferenceResults))); - } - - private static ElasticsearchStatusException unknownServiceException(String service, String inferenceId) { - return new ElasticsearchStatusException("Unknown service [{}] for model [{}]. ", RestStatus.BAD_REQUEST, service, inferenceId); - } - - private static ElasticsearchStatusException incompatibleTaskTypeException(TaskType requested, TaskType expected) { - return new ElasticsearchStatusException( - "Incompatible task_type, the requested type [{}] does not match the model type [{}]", - RestStatus.BAD_REQUEST, - requested, - expected + service.infer( + model, + request.getQuery(), + request.getInput(), + request.isStreaming(), + request.getTaskSettings(), + request.getInputType(), + request.getInferenceTimeout(), + listener ); } - } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportUnifiedCompletionInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportUnifiedCompletionInferenceAction.java new file mode 100644 index 0000000000000..f0906231d8f42 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportUnifiedCompletionInferenceAction.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceRegistry; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnparsedModel; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; +import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; +import org.elasticsearch.xpack.inference.registry.ModelRegistry; +import org.elasticsearch.xpack.inference.telemetry.InferenceStats; + +public class TransportUnifiedCompletionInferenceAction extends BaseTransportInferenceAction { + + @Inject + public TransportUnifiedCompletionInferenceAction( + TransportService transportService, + ActionFilters actionFilters, + ModelRegistry modelRegistry, + InferenceServiceRegistry serviceRegistry, + InferenceStats inferenceStats, + StreamingTaskManager streamingTaskManager + ) { + super( + UnifiedCompletionAction.NAME, + transportService, + actionFilters, + modelRegistry, + serviceRegistry, + inferenceStats, + streamingTaskManager, + UnifiedCompletionAction.Request::new + ); + } + + @Override + protected boolean isInvalidTaskTypeForInferenceEndpoint(UnifiedCompletionAction.Request request, UnparsedModel unparsedModel) { + return request.getTaskType().isAnyOrSame(TaskType.COMPLETION) == false || unparsedModel.taskType() != TaskType.COMPLETION; + } + + @Override + protected ElasticsearchStatusException createInvalidTaskTypeException( + UnifiedCompletionAction.Request request, + UnparsedModel unparsedModel + ) { + return new ElasticsearchStatusException( + "Incompatible task_type for unified API, the requested type [{}] must be one of [{}]", + RestStatus.BAD_REQUEST, + request.getTaskType(), + TaskType.COMPLETION.toString() + ); + } + + @Override + protected void doInference( + Model model, + UnifiedCompletionAction.Request request, + InferenceService service, + ActionListener listener + ) { + service.unifiedCompletionInfer(model, request.getUnifiedCompletionRequest(), null, listener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunker.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunker.java index b2d6c83b89211..bf28e30074a9d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunker.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunker.java @@ -62,7 +62,8 @@ public List chunk(String input, ChunkingSettings chunkingSettings) * * @param input Text to chunk * @param maxNumberWordsPerChunk Maximum size of the chunk - * @return The input text chunked + * @param includePrecedingSentence Include the previous sentence + * @return The input text offsets */ public List chunk(String input, int maxNumberWordsPerChunk, boolean includePrecedingSentence) { var chunks = new ArrayList(); @@ -158,6 +159,11 @@ public List chunk(String input, int maxNumberWordsPerChunk, boolean chunks.add(new ChunkOffset(chunkStart, input.length())); } + if (chunks.isEmpty()) { + // The input did not chunk, return the entire input + chunks.add(new ChunkOffset(0, input.length())); + } + return chunks; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunker.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunker.java index b15e2134f4cf7..1ce90a9e416e5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunker.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunker.java @@ -96,10 +96,6 @@ List chunkPositions(String input, int chunkSize, int overlap) { throw new IllegalArgumentException("Invalid chunking parameters, overlap [" + overlap + "] must be >= 0"); } - if (input.isEmpty()) { - return List.of(); - } - var chunkPositions = new ArrayList(); // This position in the chunk is where the next overlapping chunk will start diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/DelegatingProcessor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/DelegatingProcessor.java index fc2d890dd89e6..eda3fc0f3bfdb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/DelegatingProcessor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/DelegatingProcessor.java @@ -9,7 +9,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEvent; +import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEventField; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.Iterator; import java.util.concurrent.Flow; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -25,6 +32,33 @@ public abstract class DelegatingProcessor implements Flow.Processor private Flow.Subscriber downstream; private Flow.Subscription upstream; + public static Deque parseEvent( + Deque item, + ParseChunkFunction parseFunction, + XContentParserConfiguration parserConfig, + Logger logger + ) throws Exception { + var results = new ArrayDeque(item.size()); + for (ServerSentEvent event : item) { + if (ServerSentEventField.DATA == event.name() && event.hasValue()) { + try { + var delta = parseFunction.apply(parserConfig, event); + delta.forEachRemaining(results::offer); + } catch (Exception e) { + logger.warn("Failed to parse event from inference provider: {}", event); + throw e; + } + } + } + + return results; + } + + @FunctionalInterface + public interface ParseChunkFunction { + Iterator apply(XContentParserConfiguration parserConfig, ServerSentEvent event) throws IOException; + } + @Override public void subscribe(Flow.Subscriber subscriber) { if (downstream != null) { @@ -51,7 +85,7 @@ public void request(long n) { if (isClosed.get()) { downstream.onComplete(); } else if (upstream != null) { - upstream.request(n); + upstreamRequest(n); } else { pendingRequests.accumulateAndGet(n, Long::sum); } @@ -61,11 +95,21 @@ public void request(long n) { public void cancel() { if (isClosed.compareAndSet(false, true) && upstream != null) { upstream.cancel(); + onCancel(); } } }; } + /** + * Guaranteed to be called when the upstream is set and this processor had not been closed. + */ + protected void upstreamRequest(long n) { + upstream.request(n); + } + + protected void onCancel() {} + @Override public void onSubscribe(Flow.Subscription subscription) { if (upstream != null) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableAction.java index 4e97554b56445..b43e5ab70e2f2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; import org.elasticsearch.xpack.inference.external.http.sender.Sender; @@ -34,13 +33,7 @@ public SingleInputSenderExecutableAction( @Override public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { - if (inferenceInputs instanceof DocumentsOnlyInput == false) { - listener.onFailure(new ElasticsearchStatusException("Invalid inference input type", RestStatus.INTERNAL_SERVER_ERROR)); - return; - } - - var docsOnlyInput = (DocumentsOnlyInput) inferenceInputs; - if (docsOnlyInput.getInputs().size() > 1) { + if (inferenceInputs.inputSize() > 1) { listener.onFailure( new ElasticsearchStatusException(requestTypeForInputValidationError + " only accepts 1 input", RestStatus.BAD_REQUEST) ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java index 9c83264b5581f..bd5c53d589df0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreator.java @@ -26,7 +26,7 @@ * Provides a way to construct an {@link ExecutableAction} using the visitor pattern based on the openai model type. */ public class OpenAiActionCreator implements OpenAiActionVisitor { - private static final String COMPLETION_ERROR_PREFIX = "OpenAI chat completions"; + public static final String COMPLETION_ERROR_PREFIX = "OpenAI chat completions"; private final Sender sender; private final ServiceComponents serviceComponents; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AlibabaCloudSearchCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AlibabaCloudSearchCompletionRequestManager.java index a0a44e62f9f73..e7a960f1316f2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AlibabaCloudSearchCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AlibabaCloudSearchCompletionRequestManager.java @@ -69,7 +69,7 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - List input = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + List input = inferenceInputs.castTo(ChatCompletionInput.class).getInputs(); AlibabaCloudSearchCompletionRequest request = new AlibabaCloudSearchCompletionRequest(account, input, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java index 69a5c665feb86..3929585a0745d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java @@ -44,10 +44,10 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - var docsOnly = DocumentsOnlyInput.of(inferenceInputs); - var docsInput = docsOnly.getInputs(); - var stream = docsOnly.stream(); - var requestEntity = AmazonBedrockChatCompletionEntityFactory.createEntity(model, docsInput); + var chatCompletionInput = inferenceInputs.castTo(ChatCompletionInput.class); + var inputs = chatCompletionInput.getInputs(); + var stream = chatCompletionInput.stream(); + var requestEntity = AmazonBedrockChatCompletionEntityFactory.createEntity(model, inputs); var request = new AmazonBedrockChatCompletionRequest(model, requestEntity, timeout, stream); var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicCompletionRequestManager.java index 5418b3dd9840b..6d4aeb9e31bac 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicCompletionRequestManager.java @@ -46,10 +46,10 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - var docsOnly = DocumentsOnlyInput.of(inferenceInputs); - var docsInput = docsOnly.getInputs(); - var stream = docsOnly.stream(); - AnthropicChatCompletionRequest request = new AnthropicChatCompletionRequest(docsInput, model, stream); + var chatCompletionInput = inferenceInputs.castTo(ChatCompletionInput.class); + var inputs = chatCompletionInput.getInputs(); + var stream = chatCompletionInput.stream(); + AnthropicChatCompletionRequest request = new AnthropicChatCompletionRequest(inputs, model, stream); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java index 21cec68b14a49..affd2e3a7760e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java @@ -41,10 +41,10 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - var docsOnly = DocumentsOnlyInput.of(inferenceInputs); - var docsInput = docsOnly.getInputs(); - var stream = docsOnly.stream(); - AzureAiStudioChatCompletionRequest request = new AzureAiStudioChatCompletionRequest(model, docsInput, stream); + var chatCompletionInput = inferenceInputs.castTo(ChatCompletionInput.class); + var inputs = chatCompletionInput.getInputs(); + var stream = chatCompletionInput.stream(); + AzureAiStudioChatCompletionRequest request = new AzureAiStudioChatCompletionRequest(model, inputs, stream); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java index d036559ec3dcb..c2f5f3e9db5ed 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java @@ -46,10 +46,10 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - var docsOnly = DocumentsOnlyInput.of(inferenceInputs); - var docsInput = docsOnly.getInputs(); - var stream = docsOnly.stream(); - AzureOpenAiCompletionRequest request = new AzureOpenAiCompletionRequest(docsInput, model, stream); + var chatCompletionInput = inferenceInputs.castTo(ChatCompletionInput.class); + var inputs = chatCompletionInput.getInputs(); + var stream = chatCompletionInput.stream(); + AzureOpenAiCompletionRequest request = new AzureOpenAiCompletionRequest(inputs, model, stream); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ChatCompletionInput.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ChatCompletionInput.java new file mode 100644 index 0000000000000..928da95d9c2f0 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ChatCompletionInput.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import java.util.List; +import java.util.Objects; + +/** + * This class encapsulates the input text passed by the request and indicates whether the response should be streamed. + * The main difference between this class and {@link UnifiedChatInput} is this should only be used for + * {@link org.elasticsearch.inference.TaskType#COMPLETION} originating through the + * {@link org.elasticsearch.inference.InferenceService#infer} code path. These are requests sent to the + * API without using the _unified route. + */ +public class ChatCompletionInput extends InferenceInputs { + private final List input; + + public ChatCompletionInput(List input) { + this(input, false); + } + + public ChatCompletionInput(List input, boolean stream) { + super(stream); + this.input = Objects.requireNonNull(input); + } + + public List getInputs() { + return this.input; + } + + public int inputSize() { + return input.size(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java index ae46fbe0fef87..40cd03c87664e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java @@ -50,10 +50,10 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - var docsOnly = DocumentsOnlyInput.of(inferenceInputs); - var docsInput = docsOnly.getInputs(); - var stream = docsOnly.stream(); - CohereCompletionRequest request = new CohereCompletionRequest(docsInput, model, stream); + var chatCompletionInput = inferenceInputs.castTo(ChatCompletionInput.class); + var inputs = chatCompletionInput.getInputs(); + var stream = chatCompletionInput.stream(); + CohereCompletionRequest request = new CohereCompletionRequest(inputs, model, stream); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java index 8cf411d84c932..3feb79d3de6cc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java @@ -14,30 +14,28 @@ public class DocumentsOnlyInput extends InferenceInputs { public static DocumentsOnlyInput of(InferenceInputs inferenceInputs) { if (inferenceInputs instanceof DocumentsOnlyInput == false) { - throw createUnsupportedTypeException(inferenceInputs); + throw createUnsupportedTypeException(inferenceInputs, DocumentsOnlyInput.class); } return (DocumentsOnlyInput) inferenceInputs; } private final List input; - private final boolean stream; public DocumentsOnlyInput(List input) { this(input, false); } public DocumentsOnlyInput(List input, boolean stream) { - super(); + super(stream); this.input = Objects.requireNonNull(input); - this.stream = stream; } public List getInputs() { return this.input; } - public boolean stream() { - return stream; + public int inputSize() { + return input.size(); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java index abe50c6fae3f9..0097f9c08ea21 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java @@ -51,7 +51,10 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - GoogleAiStudioCompletionRequest request = new GoogleAiStudioCompletionRequest(DocumentsOnlyInput.of(inferenceInputs), model); + GoogleAiStudioCompletionRequest request = new GoogleAiStudioCompletionRequest( + inferenceInputs.castTo(ChatCompletionInput.class), + model + ); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java index dd241857ef0c4..e85ea6f1d9b35 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java @@ -10,7 +10,29 @@ import org.elasticsearch.common.Strings; public abstract class InferenceInputs { - public static IllegalArgumentException createUnsupportedTypeException(InferenceInputs inferenceInputs) { - return new IllegalArgumentException(Strings.format("Unsupported inference inputs type: [%s]", inferenceInputs.getClass())); + private final boolean stream; + + public InferenceInputs(boolean stream) { + this.stream = stream; + } + + public static IllegalArgumentException createUnsupportedTypeException(InferenceInputs inferenceInputs, Class clazz) { + return new IllegalArgumentException( + Strings.format("Unable to convert inference inputs type: [%s] to [%s]", inferenceInputs.getClass(), clazz) + ); } + + public T castTo(Class clazz) { + if (clazz.isInstance(this) == false) { + throw createUnsupportedTypeException(this, clazz); + } + + return clazz.cast(this); + } + + public boolean stream() { + return stream; + } + + public abstract int inputSize(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java index cea89332e5bf0..4d730be6aa6bd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; import org.elasticsearch.xpack.inference.external.openai.OpenAiChatCompletionResponseHandler; -import org.elasticsearch.xpack.inference.external.request.openai.OpenAiChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiUnifiedChatCompletionRequest; import org.elasticsearch.xpack.inference.external.response.openai.OpenAiChatCompletionResponseEntity; import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; @@ -25,8 +25,8 @@ public class OpenAiCompletionRequestManager extends OpenAiRequestManager { private static final Logger logger = LogManager.getLogger(OpenAiCompletionRequestManager.class); - private static final ResponseHandler HANDLER = createCompletionHandler(); + static final String USER_ROLE = "user"; public static OpenAiCompletionRequestManager of(OpenAiChatCompletionModel model, ThreadPool threadPool) { return new OpenAiCompletionRequestManager(Objects.requireNonNull(model), Objects.requireNonNull(threadPool)); @@ -35,7 +35,7 @@ public static OpenAiCompletionRequestManager of(OpenAiChatCompletionModel model, private final OpenAiChatCompletionModel model; private OpenAiCompletionRequestManager(OpenAiChatCompletionModel model, ThreadPool threadPool) { - super(threadPool, model, OpenAiChatCompletionRequest::buildDefaultUri); + super(threadPool, model, OpenAiUnifiedChatCompletionRequest::buildDefaultUri); this.model = Objects.requireNonNull(model); } @@ -46,10 +46,8 @@ public void execute( Supplier hasRequestCompletedFunction, ActionListener listener ) { - var docsOnly = DocumentsOnlyInput.of(inferenceInputs); - var docsInput = docsOnly.getInputs(); - var stream = docsOnly.stream(); - OpenAiChatCompletionRequest request = new OpenAiChatCompletionRequest(docsInput, model, stream); + var chatCompletionInputs = inferenceInputs.castTo(ChatCompletionInput.class); + var request = new OpenAiUnifiedChatCompletionRequest(new UnifiedChatInput(chatCompletionInputs, USER_ROLE), model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiUnifiedCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiUnifiedCompletionRequestManager.java new file mode 100644 index 0000000000000..3b0f770e3e061 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiUnifiedCompletionRequestManager.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.openai.OpenAiUnifiedChatCompletionResponseHandler; +import org.elasticsearch.xpack.inference.external.request.openai.OpenAiUnifiedChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.openai.OpenAiChatCompletionResponseEntity; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; + +import java.util.Objects; +import java.util.function.Supplier; + +public class OpenAiUnifiedCompletionRequestManager extends OpenAiRequestManager { + + private static final Logger logger = LogManager.getLogger(OpenAiUnifiedCompletionRequestManager.class); + + private static final ResponseHandler HANDLER = createCompletionHandler(); + + public static OpenAiUnifiedCompletionRequestManager of(OpenAiChatCompletionModel model, ThreadPool threadPool) { + return new OpenAiUnifiedCompletionRequestManager(Objects.requireNonNull(model), Objects.requireNonNull(threadPool)); + } + + private final OpenAiChatCompletionModel model; + + private OpenAiUnifiedCompletionRequestManager(OpenAiChatCompletionModel model, ThreadPool threadPool) { + super(threadPool, model, OpenAiUnifiedChatCompletionRequest::buildDefaultUri); + this.model = Objects.requireNonNull(model); + } + + @Override + public void execute( + InferenceInputs inferenceInputs, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + + OpenAiUnifiedChatCompletionRequest request = new OpenAiUnifiedChatCompletionRequest( + inferenceInputs.castTo(UnifiedChatInput.class), + model + ); + + execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); + } + + private static ResponseHandler createCompletionHandler() { + return new OpenAiUnifiedChatCompletionResponseHandler("openai completion", OpenAiChatCompletionResponseEntity::fromResponse); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java index 50bb77b307db3..5af5245ac5b40 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java @@ -14,7 +14,7 @@ public class QueryAndDocsInputs extends InferenceInputs { public static QueryAndDocsInputs of(InferenceInputs inferenceInputs) { if (inferenceInputs instanceof QueryAndDocsInputs == false) { - throw createUnsupportedTypeException(inferenceInputs); + throw createUnsupportedTypeException(inferenceInputs, QueryAndDocsInputs.class); } return (QueryAndDocsInputs) inferenceInputs; @@ -22,17 +22,15 @@ public static QueryAndDocsInputs of(InferenceInputs inferenceInputs) { private final String query; private final List chunks; - private final boolean stream; public QueryAndDocsInputs(String query, List chunks) { this(query, chunks, false); } public QueryAndDocsInputs(String query, List chunks, boolean stream) { - super(); + super(stream); this.query = Objects.requireNonNull(query); this.chunks = Objects.requireNonNull(chunks); - this.stream = stream; } public String getQuery() { @@ -43,8 +41,7 @@ public List getChunks() { return chunks; } - public boolean stream() { - return stream; + public int inputSize() { + return chunks.size(); } - } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/UnifiedChatInput.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/UnifiedChatInput.java new file mode 100644 index 0000000000000..f89fa1ee37a6f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/UnifiedChatInput.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.UnifiedCompletionRequest; + +import java.util.List; +import java.util.Objects; + +/** + * This class encapsulates the unified request. + * The main difference between this class and {@link ChatCompletionInput} is this should only be used for + * {@link org.elasticsearch.inference.TaskType#COMPLETION} originating through the + * {@link org.elasticsearch.inference.InferenceService#unifiedCompletionInfer(Model, UnifiedCompletionRequest, TimeValue, ActionListener)} + * code path. These are requests sent to the API with the _unified route. + */ +public class UnifiedChatInput extends InferenceInputs { + private final UnifiedCompletionRequest request; + + public UnifiedChatInput(UnifiedCompletionRequest request, boolean stream) { + super(stream); + this.request = Objects.requireNonNull(request); + } + + public UnifiedChatInput(ChatCompletionInput completionInput, String roleValue) { + this(completionInput.getInputs(), roleValue, completionInput.stream()); + } + + public UnifiedChatInput(List inputs, String roleValue, boolean stream) { + this(UnifiedCompletionRequest.of(convertToMessages(inputs, roleValue)), stream); + } + + private static List convertToMessages(List inputs, String roleValue) { + return inputs.stream() + .map( + value -> new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString(value), + roleValue, + null, + null, + null + ) + ) + .toList(); + } + + public UnifiedCompletionRequest getRequest() { + return request; + } + + public int inputSize() { + return request.messages().size(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiStreamingProcessor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiStreamingProcessor.java index 6e006fe255956..48c8132035b50 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiStreamingProcessor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiStreamingProcessor.java @@ -18,10 +18,8 @@ import org.elasticsearch.xpack.core.inference.results.StreamingChatCompletionResults; import org.elasticsearch.xpack.inference.common.DelegatingProcessor; import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEvent; -import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEventField; import java.io.IOException; -import java.util.ArrayDeque; import java.util.Collections; import java.util.Deque; import java.util.Iterator; @@ -115,19 +113,7 @@ public class OpenAiStreamingProcessor extends DelegatingProcessor item) throws Exception { var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); - - var results = new ArrayDeque(item.size()); - for (ServerSentEvent event : item) { - if (ServerSentEventField.DATA == event.name() && event.hasValue()) { - try { - var delta = parse(parserConfig, event); - delta.forEachRemaining(results::offer); - } catch (Exception e) { - log.warn("Failed to parse event from inference provider: {}", event); - throw e; - } - } - } + var results = parseEvent(item, OpenAiStreamingProcessor::parse, parserConfig, log); if (results.isEmpty()) { upstream().request(1); @@ -136,7 +122,7 @@ protected void next(Deque item) throws Exception { } } - private Iterator parse(XContentParserConfiguration parserConfig, ServerSentEvent event) + private static Iterator parse(XContentParserConfiguration parserConfig, ServerSentEvent event) throws IOException { if (DONE_MESSAGE.equalsIgnoreCase(event.value())) { return Collections.emptyIterator(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedChatCompletionResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedChatCompletionResponseHandler.java new file mode 100644 index 0000000000000..fce2556efc5e0 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedChatCompletionResponseHandler.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.core.inference.results.StreamingUnifiedChatCompletionResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseParser; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEventParser; +import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEventProcessor; + +import java.util.concurrent.Flow; + +public class OpenAiUnifiedChatCompletionResponseHandler extends OpenAiChatCompletionResponseHandler { + public OpenAiUnifiedChatCompletionResponseHandler(String requestType, ResponseParser parseFunction) { + super(requestType, parseFunction); + } + + @Override + public InferenceServiceResults parseResult(Request request, Flow.Publisher flow) { + var serverSentEventProcessor = new ServerSentEventProcessor(new ServerSentEventParser()); + var openAiProcessor = new OpenAiUnifiedStreamingProcessor(); + + flow.subscribe(serverSentEventProcessor); + serverSentEventProcessor.subscribe(openAiProcessor); + return new StreamingUnifiedChatCompletionResults(openAiProcessor); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedStreamingProcessor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedStreamingProcessor.java new file mode 100644 index 0000000000000..599d71df3dcfa --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedStreamingProcessor.java @@ -0,0 +1,287 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.StreamingUnifiedChatCompletionResults; +import org.elasticsearch.xpack.inference.common.DelegatingProcessor; +import org.elasticsearch.xpack.inference.external.response.streaming.ServerSentEvent; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.Collections; +import java.util.Deque; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.LinkedBlockingDeque; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.moveToFirstToken; + +public class OpenAiUnifiedStreamingProcessor extends DelegatingProcessor, ChunkedToXContent> { + public static final String FUNCTION_FIELD = "function"; + private static final Logger logger = LogManager.getLogger(OpenAiUnifiedStreamingProcessor.class); + + private static final String CHOICES_FIELD = "choices"; + private static final String DELTA_FIELD = "delta"; + private static final String CONTENT_FIELD = "content"; + private static final String DONE_MESSAGE = "[done]"; + private static final String REFUSAL_FIELD = "refusal"; + private static final String TOOL_CALLS_FIELD = "tool_calls"; + public static final String ROLE_FIELD = "role"; + public static final String FINISH_REASON_FIELD = "finish_reason"; + public static final String INDEX_FIELD = "index"; + public static final String OBJECT_FIELD = "object"; + public static final String MODEL_FIELD = "model"; + public static final String ID_FIELD = "id"; + public static final String CHOICE_FIELD = "choice"; + public static final String USAGE_FIELD = "usage"; + public static final String TYPE_FIELD = "type"; + public static final String NAME_FIELD = "name"; + public static final String ARGUMENTS_FIELD = "arguments"; + public static final String COMPLETION_TOKENS_FIELD = "completion_tokens"; + public static final String PROMPT_TOKENS_FIELD = "prompt_tokens"; + public static final String TOTAL_TOKENS_FIELD = "total_tokens"; + + private final Deque buffer = new LinkedBlockingDeque<>(); + + @Override + protected void upstreamRequest(long n) { + if (buffer.isEmpty()) { + super.upstreamRequest(n); + } else { + downstream().onNext(new StreamingUnifiedChatCompletionResults.Results(singleItem(buffer.poll()))); + } + } + + @Override + protected void next(Deque item) throws Exception { + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + var results = parseEvent(item, OpenAiUnifiedStreamingProcessor::parse, parserConfig, logger); + + if (results.isEmpty()) { + upstream().request(1); + } else if (results.size() == 1) { + downstream().onNext(new StreamingUnifiedChatCompletionResults.Results(results)); + } else { + // results > 1, but openai spec only wants 1 chunk per SSE event + var firstItem = singleItem(results.poll()); + while (results.isEmpty() == false) { + buffer.offer(results.poll()); + } + downstream().onNext(new StreamingUnifiedChatCompletionResults.Results(firstItem)); + } + } + + private static Iterator parse( + XContentParserConfiguration parserConfig, + ServerSentEvent event + ) throws IOException { + if (DONE_MESSAGE.equalsIgnoreCase(event.value())) { + return Collections.emptyIterator(); + } + + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, event.value())) { + moveToFirstToken(jsonParser); + + XContentParser.Token token = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + StreamingUnifiedChatCompletionResults.ChatCompletionChunk chunk = ChatCompletionChunkParser.parse(jsonParser); + + return Collections.singleton(chunk).iterator(); + } + } + + public static class ChatCompletionChunkParser { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "chat_completion_chunk", + true, + args -> new StreamingUnifiedChatCompletionResults.ChatCompletionChunk( + (String) args[0], + (List) args[1], + (String) args[2], + (String) args[3], + (StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Usage) args[4] + ) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField(ID_FIELD)); + PARSER.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> ChatCompletionChunkParser.ChoiceParser.parse(p), + new ParseField(CHOICES_FIELD) + ); + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField(MODEL_FIELD)); + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField(OBJECT_FIELD)); + PARSER.declareObjectOrNull( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ChatCompletionChunkParser.UsageParser.parse(p), + null, + new ParseField(USAGE_FIELD) + ); + } + + public static StreamingUnifiedChatCompletionResults.ChatCompletionChunk parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private static class ChoiceParser { + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + CHOICE_FIELD, + true, + args -> new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice( + (StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta) args[0], + (String) args[1], + (int) args[2] + ) + ); + + static { + PARSER.declareObject( + ConstructingObjectParser.constructorArg(), + (p, c) -> ChatCompletionChunkParser.DeltaParser.parse(p), + new ParseField(DELTA_FIELD) + ); + PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField(FINISH_REASON_FIELD)); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), new ParseField(INDEX_FIELD)); + } + + public static StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + } + + private static class DeltaParser { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser< + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta, + Void> PARSER = new ConstructingObjectParser<>( + DELTA_FIELD, + true, + args -> new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta( + (String) args[0], + (String) args[1], + (String) args[2], + (List) args[3] + ) + ); + + static { + PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField(CONTENT_FIELD)); + PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField(REFUSAL_FIELD)); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField(ROLE_FIELD)); + PARSER.declareObjectArray( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ChatCompletionChunkParser.ToolCallParser.parse(p), + new ParseField(TOOL_CALLS_FIELD) + ); + } + + public static StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta parse(XContentParser parser) + throws IOException { + return PARSER.parse(parser, null); + } + } + + private static class ToolCallParser { + private static final ConstructingObjectParser< + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall, + Void> PARSER = new ConstructingObjectParser<>( + "tool_call", + true, + args -> new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall( + (int) args[0], + (String) args[1], + (StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall.Function) args[2], + (String) args[3] + ) + ); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), new ParseField(INDEX_FIELD)); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField(ID_FIELD)); + PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ChatCompletionChunkParser.FunctionParser.parse(p), + new ParseField(FUNCTION_FIELD) + ); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField(TYPE_FIELD)); + } + + public static StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall parse(XContentParser parser) + throws IOException { + return PARSER.parse(parser, null); + } + } + + private static class FunctionParser { + private static final ConstructingObjectParser< + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall.Function, + Void> PARSER = new ConstructingObjectParser<>( + FUNCTION_FIELD, + true, + args -> new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall.Function( + (String) args[0], + (String) args[1] + ) + ); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField(ARGUMENTS_FIELD)); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField(NAME_FIELD)); + } + + public static StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall.Function parse( + XContentParser parser + ) throws IOException { + return PARSER.parse(parser, null); + } + } + + private static class UsageParser { + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + USAGE_FIELD, + true, + args -> new StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Usage((int) args[0], (int) args[1], (int) args[2]) + ); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), new ParseField(COMPLETION_TOKENS_FIELD)); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), new ParseField(PROMPT_TOKENS_FIELD)); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), new ParseField(TOTAL_TOKENS_FIELD)); + } + + public static StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Usage parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + } + + private Deque singleItem( + StreamingUnifiedChatCompletionResults.ChatCompletionChunk result + ) { + var deque = new ArrayDeque(1); + deque.offer(result); + return deque; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/GoogleAiStudioCompletionRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/GoogleAiStudioCompletionRequest.java index 80770d63ef139..b1af18d03dda4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/GoogleAiStudioCompletionRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/GoogleAiStudioCompletionRequest.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.request.HttpRequest; import org.elasticsearch.xpack.inference.external.request.Request; import org.elasticsearch.xpack.inference.services.googleaistudio.completion.GoogleAiStudioCompletionModel; @@ -27,13 +27,13 @@ public class GoogleAiStudioCompletionRequest implements GoogleAiStudioRequest { private static final String ALT_PARAM = "alt"; private static final String SSE_VALUE = "sse"; - private final DocumentsOnlyInput input; + private final ChatCompletionInput input; private final LazyInitializable uri; private final GoogleAiStudioCompletionModel model; - public GoogleAiStudioCompletionRequest(DocumentsOnlyInput input, GoogleAiStudioCompletionModel model) { + public GoogleAiStudioCompletionRequest(ChatCompletionInput input, GoogleAiStudioCompletionModel model) { this.input = Objects.requireNonNull(input); this.model = Objects.requireNonNull(model); this.uri = new LazyInitializable<>(() -> model.uri(input.stream())); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntity.java deleted file mode 100644 index 867a7ca80cbcb..0000000000000 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntity.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.request.openai; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -public class OpenAiChatCompletionRequestEntity implements ToXContentObject { - - private static final String MESSAGES_FIELD = "messages"; - private static final String MODEL_FIELD = "model"; - - private static final String NUMBER_OF_RETURNED_CHOICES_FIELD = "n"; - - private static final String ROLE_FIELD = "role"; - private static final String USER_FIELD = "user"; - private static final String CONTENT_FIELD = "content"; - private static final String STREAM_FIELD = "stream"; - - private final List messages; - private final String model; - - private final String user; - private final boolean stream; - - public OpenAiChatCompletionRequestEntity(List messages, String model, String user, boolean stream) { - Objects.requireNonNull(messages); - Objects.requireNonNull(model); - - this.messages = messages; - this.model = model; - this.user = user; - this.stream = stream; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startArray(MESSAGES_FIELD); - { - for (String message : messages) { - builder.startObject(); - - { - builder.field(ROLE_FIELD, USER_FIELD); - builder.field(CONTENT_FIELD, message); - } - - builder.endObject(); - } - } - builder.endArray(); - - builder.field(MODEL_FIELD, model); - builder.field(NUMBER_OF_RETURNED_CHOICES_FIELD, 1); - - if (Strings.isNullOrEmpty(user) == false) { - builder.field(USER_FIELD, user); - } - - if (stream) { - builder.field(STREAM_FIELD, true); - } - - builder.endObject(); - - return builder; - } -} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequest.java similarity index 80% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequest.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequest.java index 99a025e70d003..2e6bdb748fd33 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequest.java @@ -13,6 +13,7 @@ import org.apache.http.entity.ByteArrayEntity; import org.elasticsearch.common.Strings; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.external.openai.OpenAiAccount; import org.elasticsearch.xpack.inference.external.request.HttpRequest; import org.elasticsearch.xpack.inference.external.request.Request; @@ -21,24 +22,21 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; -import java.util.List; import java.util.Objects; import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.createOrgHeader; -public class OpenAiChatCompletionRequest implements OpenAiRequest { +public class OpenAiUnifiedChatCompletionRequest implements OpenAiRequest { private final OpenAiAccount account; - private final List input; private final OpenAiChatCompletionModel model; - private final boolean stream; + private final UnifiedChatInput unifiedChatInput; - public OpenAiChatCompletionRequest(List input, OpenAiChatCompletionModel model, boolean stream) { - this.account = OpenAiAccount.of(model, OpenAiChatCompletionRequest::buildDefaultUri); - this.input = Objects.requireNonNull(input); + public OpenAiUnifiedChatCompletionRequest(UnifiedChatInput unifiedChatInput, OpenAiChatCompletionModel model) { + this.account = OpenAiAccount.of(model, OpenAiUnifiedChatCompletionRequest::buildDefaultUri); + this.unifiedChatInput = Objects.requireNonNull(unifiedChatInput); this.model = Objects.requireNonNull(model); - this.stream = stream; } @Override @@ -46,9 +44,7 @@ public HttpRequest createHttpRequest() { HttpPost httpPost = new HttpPost(account.uri()); ByteArrayEntity byteEntity = new ByteArrayEntity( - Strings.toString( - new OpenAiChatCompletionRequestEntity(input, model.getServiceSettings().modelId(), model.getTaskSettings().user(), stream) - ).getBytes(StandardCharsets.UTF_8) + Strings.toString(new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model)).getBytes(StandardCharsets.UTF_8) ); httpPost.setEntity(byteEntity); @@ -87,7 +83,7 @@ public String getInferenceEntityId() { @Override public boolean isStreaming() { - return stream; + return unifiedChatInput.stream(); } public static URI buildDefaultUri() throws URISyntaxException { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestEntity.java new file mode 100644 index 0000000000000..5b7b274f2351b --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestEntity.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; + +import java.io.IOException; +import java.util.Objects; + +public class OpenAiUnifiedChatCompletionRequestEntity implements ToXContentObject { + + public static final String NAME_FIELD = "name"; + public static final String TOOL_CALL_ID_FIELD = "tool_call_id"; + public static final String TOOL_CALLS_FIELD = "tool_calls"; + public static final String ID_FIELD = "id"; + public static final String FUNCTION_FIELD = "function"; + public static final String ARGUMENTS_FIELD = "arguments"; + public static final String DESCRIPTION_FIELD = "description"; + public static final String PARAMETERS_FIELD = "parameters"; + public static final String STRICT_FIELD = "strict"; + public static final String TOP_P_FIELD = "top_p"; + public static final String USER_FIELD = "user"; + public static final String STREAM_FIELD = "stream"; + private static final String NUMBER_OF_RETURNED_CHOICES_FIELD = "n"; + private static final String MODEL_FIELD = "model"; + public static final String MESSAGES_FIELD = "messages"; + private static final String ROLE_FIELD = "role"; + private static final String CONTENT_FIELD = "content"; + private static final String MAX_COMPLETION_TOKENS_FIELD = "max_completion_tokens"; + private static final String STOP_FIELD = "stop"; + private static final String TEMPERATURE_FIELD = "temperature"; + private static final String TOOL_CHOICE_FIELD = "tool_choice"; + private static final String TOOL_FIELD = "tools"; + private static final String TEXT_FIELD = "text"; + private static final String TYPE_FIELD = "type"; + private static final String STREAM_OPTIONS_FIELD = "stream_options"; + private static final String INCLUDE_USAGE_FIELD = "include_usage"; + + private final UnifiedCompletionRequest unifiedRequest; + private final boolean stream; + private final OpenAiChatCompletionModel model; + + public OpenAiUnifiedChatCompletionRequestEntity(UnifiedChatInput unifiedChatInput, OpenAiChatCompletionModel model) { + Objects.requireNonNull(unifiedChatInput); + + this.unifiedRequest = unifiedChatInput.getRequest(); + this.stream = unifiedChatInput.stream(); + this.model = Objects.requireNonNull(model); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(MESSAGES_FIELD); + { + for (UnifiedCompletionRequest.Message message : unifiedRequest.messages()) { + builder.startObject(); + { + if (message.content() instanceof UnifiedCompletionRequest.ContentString contentString) { + builder.field(CONTENT_FIELD, contentString.content()); + } else if (message.content() instanceof UnifiedCompletionRequest.ContentObjects contentObjects) { + builder.startArray(CONTENT_FIELD); + for (UnifiedCompletionRequest.ContentObject contentObject : contentObjects.contentObjects()) { + builder.startObject(); + builder.field(TEXT_FIELD, contentObject.text()); + builder.field(TYPE_FIELD, contentObject.type()); + builder.endObject(); + } + builder.endArray(); + } else { + throw new IllegalArgumentException( + Strings.format("Unsupported message.content class received: %s", message.content().getClass().getSimpleName()) + ); + } + + builder.field(ROLE_FIELD, message.role()); + if (message.name() != null) { + builder.field(NAME_FIELD, message.name()); + } + if (message.toolCallId() != null) { + builder.field(TOOL_CALL_ID_FIELD, message.toolCallId()); + } + if (message.toolCalls() != null) { + builder.startArray(TOOL_CALLS_FIELD); + for (UnifiedCompletionRequest.ToolCall toolCall : message.toolCalls()) { + builder.startObject(); + { + builder.field(ID_FIELD, toolCall.id()); + builder.startObject(FUNCTION_FIELD); + { + builder.field(ARGUMENTS_FIELD, toolCall.function().arguments()); + builder.field(NAME_FIELD, toolCall.function().name()); + } + builder.endObject(); + builder.field(TYPE_FIELD, toolCall.type()); + } + builder.endObject(); + } + builder.endArray(); + } + } + builder.endObject(); + } + } + builder.endArray(); + + builder.field(MODEL_FIELD, model.getServiceSettings().modelId()); + if (unifiedRequest.maxCompletionTokens() != null) { + builder.field(MAX_COMPLETION_TOKENS_FIELD, unifiedRequest.maxCompletionTokens()); + } + + builder.field(NUMBER_OF_RETURNED_CHOICES_FIELD, 1); + + if (unifiedRequest.stop() != null && unifiedRequest.stop().isEmpty() == false) { + builder.field(STOP_FIELD, unifiedRequest.stop()); + } + if (unifiedRequest.temperature() != null) { + builder.field(TEMPERATURE_FIELD, unifiedRequest.temperature()); + } + if (unifiedRequest.toolChoice() != null) { + if (unifiedRequest.toolChoice() instanceof UnifiedCompletionRequest.ToolChoiceString) { + builder.field(TOOL_CHOICE_FIELD, ((UnifiedCompletionRequest.ToolChoiceString) unifiedRequest.toolChoice()).value()); + } else if (unifiedRequest.toolChoice() instanceof UnifiedCompletionRequest.ToolChoiceObject) { + builder.startObject(TOOL_CHOICE_FIELD); + { + builder.field(TYPE_FIELD, ((UnifiedCompletionRequest.ToolChoiceObject) unifiedRequest.toolChoice()).type()); + builder.startObject(FUNCTION_FIELD); + { + builder.field( + NAME_FIELD, + ((UnifiedCompletionRequest.ToolChoiceObject) unifiedRequest.toolChoice()).function().name() + ); + } + builder.endObject(); + } + builder.endObject(); + } + } + if (unifiedRequest.tools() != null && unifiedRequest.tools().isEmpty() == false) { + builder.startArray(TOOL_FIELD); + for (UnifiedCompletionRequest.Tool t : unifiedRequest.tools()) { + builder.startObject(); + { + builder.field(TYPE_FIELD, t.type()); + builder.startObject(FUNCTION_FIELD); + { + builder.field(DESCRIPTION_FIELD, t.function().description()); + builder.field(NAME_FIELD, t.function().name()); + builder.field(PARAMETERS_FIELD, t.function().parameters()); + if (t.function().strict() != null) { + builder.field(STRICT_FIELD, t.function().strict()); + } + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + if (unifiedRequest.topP() != null) { + builder.field(TOP_P_FIELD, unifiedRequest.topP()); + } + + if (Strings.isNullOrEmpty(model.getTaskSettings().user()) == false) { + builder.field(USER_FIELD, model.getTaskSettings().user()); + } + + builder.field(STREAM_FIELD, stream); + if (stream) { + builder.startObject(STREAM_OPTIONS_FIELD); + builder.field(INCLUDE_USAGE_FIELD, true); + builder.endObject(); + } + builder.endObject(); + + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..a4a8123935c3e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.mapper.IndexFieldMapper; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class SemanticMatchQueryRewriteInterceptor implements QueryRewriteInterceptor { + + public static final NodeFeature SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED = new NodeFeature( + "search.semantic_match_query_rewrite_interception_supported" + ); + + public SemanticMatchQueryRewriteInterceptor() {} + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + assert (queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; + QueryBuilder rewritten = queryBuilder; + ResolvedIndices resolvedIndices = context.getResolvedIndices(); + if (resolvedIndices != null) { + Collection indexMetadataCollection = resolvedIndices.getConcreteLocalIndicesMetadata().values(); + List inferenceIndices = new ArrayList<>(); + List nonInferenceIndices = new ArrayList<>(); + for (IndexMetadata indexMetadata : indexMetadataCollection) { + String indexName = indexMetadata.getIndex().getName(); + InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(matchQueryBuilder.fieldName()); + if (inferenceFieldMetadata != null) { + inferenceIndices.add(indexName); + } else { + nonInferenceIndices.add(indexName); + } + } + + if (inferenceIndices.isEmpty()) { + return rewritten; + } else if (nonInferenceIndices.isEmpty() == false) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + for (String inferenceIndexName : inferenceIndices) { + // Add a separate clause for each semantic query, because they may be using different inference endpoints + // TODO - consolidate this to a single clause once the semantic query supports multiple inference endpoints + boolQueryBuilder.should( + createSemanticSubQuery(inferenceIndexName, matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value()) + ); + } + boolQueryBuilder.should(createMatchSubQuery(nonInferenceIndices, matchQueryBuilder)); + rewritten = boolQueryBuilder; + } else { + rewritten = new SemanticQueryBuilder(matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value(), false); + } + } + + return rewritten; + + } + + @Override + public String getQueryName() { + return MatchQueryBuilder.NAME; + } + + private QueryBuilder createSemanticSubQuery(String indexName, String fieldName, String value) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(new SemanticQueryBuilder(fieldName, value, true)); + boolQueryBuilder.filter(new TermQueryBuilder(IndexFieldMapper.NAME, indexName)); + return boolQueryBuilder; + } + + private QueryBuilder createMatchSubQuery(List indices, MatchQueryBuilder matchQueryBuilder) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(matchQueryBuilder); + boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); + return boolQueryBuilder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java index 501b6e6c2bfe2..30094ff7dbdfc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java @@ -45,6 +45,7 @@ import java.util.Objects; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -53,16 +54,18 @@ public class SemanticQueryBuilder extends AbstractQueryBuilder PARSER = new ConstructingObjectParser<>( NAME, false, - args -> new SemanticQueryBuilder((String) args[0], (String) args[1]) + args -> new SemanticQueryBuilder((String) args[0], (String) args[1], (Boolean) args[2]) ); static { PARSER.declareString(constructorArg(), FIELD_FIELD); PARSER.declareString(constructorArg(), QUERY_FIELD); + PARSER.declareBoolean(optionalConstructorArg(), LENIENT_FIELD); declareStandardFields(PARSER); } @@ -71,8 +74,13 @@ public class SemanticQueryBuilder extends AbstractQueryBuilder inferenceResultsSupplier; private final InferenceResults inferenceResults; private final boolean noInferenceResults; + private final Boolean lenient; public SemanticQueryBuilder(String fieldName, String query) { + this(fieldName, query, null); + } + + public SemanticQueryBuilder(String fieldName, String query, Boolean lenient) { if (fieldName == null) { throw new IllegalArgumentException("[" + NAME + "] requires a " + FIELD_FIELD.getPreferredName() + " value"); } @@ -84,6 +92,7 @@ public SemanticQueryBuilder(String fieldName, String query) { this.inferenceResults = null; this.inferenceResultsSupplier = null; this.noInferenceResults = false; + this.lenient = lenient; } public SemanticQueryBuilder(StreamInput in) throws IOException { @@ -93,6 +102,11 @@ public SemanticQueryBuilder(StreamInput in) throws IOException { this.inferenceResults = in.readOptionalNamedWriteable(InferenceResults.class); this.noInferenceResults = in.readBoolean(); this.inferenceResultsSupplier = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_QUERY_LENIENT)) { + this.lenient = in.readOptionalBoolean(); + } else { + this.lenient = null; + } } @Override @@ -104,6 +118,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(query); out.writeOptionalNamedWriteable(inferenceResults); out.writeBoolean(noInferenceResults); + if (out.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_QUERY_LENIENT)) { + out.writeOptionalBoolean(lenient); + } } private SemanticQueryBuilder( @@ -119,6 +136,7 @@ private SemanticQueryBuilder( this.inferenceResultsSupplier = inferenceResultsSupplier; this.inferenceResults = inferenceResults; this.noInferenceResults = noInferenceResults; + this.lenient = other.lenient; } @Override @@ -140,6 +158,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.startObject(NAME); builder.field(FIELD_FIELD.getPreferredName(), fieldName); builder.field(QUERY_FIELD.getPreferredName(), query); + if (lenient != null) { + builder.field(LENIENT_FIELD.getPreferredName(), lenient); + } boostAndQueryNameToXContent(builder); builder.endObject(); } @@ -167,6 +188,8 @@ private QueryBuilder doRewriteBuildSemanticQuery(SearchExecutionContext searchEx } return semanticTextFieldType.semanticQuery(inferenceResults, searchExecutionContext.requestSize(), boost(), queryName()); + } else if (lenient != null && lenient) { + return new MatchNoneQueryBuilder(); } else { throw new IllegalArgumentException( "Field [" + fieldName + "] of type [" + fieldType.typeName() + "] does not support " + NAME + " queries" diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/BaseInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/BaseInferenceAction.java index e72e68052f648..d911158e82296 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/BaseInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/BaseInferenceAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestChannel; @@ -21,27 +22,32 @@ import static org.elasticsearch.xpack.inference.rest.Paths.TASK_TYPE_OR_INFERENCE_ID; abstract class BaseInferenceAction extends BaseRestHandler { - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - String inferenceEntityId; - TaskType taskType; + static Params parseParams(RestRequest restRequest) { if (restRequest.hasParam(INFERENCE_ID)) { - inferenceEntityId = restRequest.param(INFERENCE_ID); - taskType = TaskType.fromStringOrStatusException(restRequest.param(TASK_TYPE_OR_INFERENCE_ID)); + var inferenceEntityId = restRequest.param(INFERENCE_ID); + var taskType = TaskType.fromStringOrStatusException(restRequest.param(TASK_TYPE_OR_INFERENCE_ID)); + return new Params(inferenceEntityId, taskType); } else { - inferenceEntityId = restRequest.param(TASK_TYPE_OR_INFERENCE_ID); - taskType = TaskType.ANY; + return new Params(restRequest.param(TASK_TYPE_OR_INFERENCE_ID), TaskType.ANY); } + } + + record Params(String inferenceEntityId, TaskType taskType) {} + + static TimeValue parseTimeout(RestRequest restRequest) { + return restRequest.paramAsTime(InferenceAction.Request.TIMEOUT.getPreferredName(), InferenceAction.Request.DEFAULT_TIMEOUT); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + var params = parseParams(restRequest); InferenceAction.Request.Builder requestBuilder; try (var parser = restRequest.contentParser()) { - requestBuilder = InferenceAction.Request.parseRequest(inferenceEntityId, taskType, parser); + requestBuilder = InferenceAction.Request.parseRequest(params.inferenceEntityId(), params.taskType(), parser); } - var inferTimeout = restRequest.paramAsTime( - InferenceAction.Request.TIMEOUT.getPreferredName(), - InferenceAction.Request.DEFAULT_TIMEOUT - ); + var inferTimeout = parseTimeout(restRequest); requestBuilder.setInferenceTimeout(inferTimeout); var request = prepareInferenceRequest(requestBuilder); return channel -> client.execute(InferenceAction.INSTANCE, request, listener(channel)); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java index 55d6443b43c03..c46f211bb26af 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/Paths.java @@ -30,6 +30,12 @@ public final class Paths { + "}/{" + INFERENCE_ID + "}/_stream"; + static final String UNIFIED_INFERENCE_ID_PATH = "_inference/{" + TASK_TYPE_OR_INFERENCE_ID + "}/_unified"; + static final String UNIFIED_TASK_TYPE_INFERENCE_ID_PATH = "_inference/{" + + TASK_TYPE_OR_INFERENCE_ID + + "}/{" + + INFERENCE_ID + + "}/_unified"; private Paths() { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestUnifiedCompletionInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestUnifiedCompletionInferenceAction.java new file mode 100644 index 0000000000000..5c71b560a6b9d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestUnifiedCompletionInferenceAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.inference.rest.Paths.UNIFIED_INFERENCE_ID_PATH; +import static org.elasticsearch.xpack.inference.rest.Paths.UNIFIED_TASK_TYPE_INFERENCE_ID_PATH; + +@ServerlessScope(Scope.PUBLIC) +public class RestUnifiedCompletionInferenceAction extends BaseRestHandler { + @Override + public String getName() { + return "unified_inference_action"; + } + + @Override + public List routes() { + return List.of(new Route(POST, UNIFIED_INFERENCE_ID_PATH), new Route(POST, UNIFIED_TASK_TYPE_INFERENCE_ID_PATH)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + var params = BaseInferenceAction.parseParams(restRequest); + + var inferTimeout = BaseInferenceAction.parseTimeout(restRequest); + + UnifiedCompletionAction.Request request; + try (var parser = restRequest.contentParser()) { + request = UnifiedCompletionAction.Request.parseRequest(params.inferenceEntityId(), params.taskType(), inferTimeout, parser); + } + + return channel -> client.execute(UnifiedCompletionAction.INSTANCE, request, new ServerSentEventsRestActionListener(channel)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index f9890c62a749e..ce6ac6747eba8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.inference.services; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.InferenceService; @@ -17,11 +19,15 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; import org.elasticsearch.xpack.inference.external.http.sender.QueryAndDocsInputs; import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import java.io.IOException; import java.util.EnumSet; @@ -61,11 +67,31 @@ public void infer( ActionListener listener ) { init(); - if (query != null) { - doInfer(model, new QueryAndDocsInputs(query, input, stream), taskSettings, inputType, timeout, listener); - } else { - doInfer(model, new DocumentsOnlyInput(input, stream), taskSettings, inputType, timeout, listener); - } + var inferenceInput = createInput(model, input, query, stream); + doInfer(model, inferenceInput, taskSettings, inputType, timeout, listener); + } + + private static InferenceInputs createInput(Model model, List input, @Nullable String query, boolean stream) { + return switch (model.getTaskType()) { + case COMPLETION -> new ChatCompletionInput(input, stream); + case RERANK -> new QueryAndDocsInputs(query, input, stream); + case TEXT_EMBEDDING, SPARSE_EMBEDDING -> new DocumentsOnlyInput(input, stream); + default -> throw new ElasticsearchStatusException( + Strings.format("Invalid task type received when determining input type: [%s]", model.getTaskType().toString()), + RestStatus.BAD_REQUEST + ); + }; + } + + @Override + public void unifiedCompletionInfer( + Model model, + UnifiedCompletionRequest request, + TimeValue timeout, + ActionListener listener + ) { + init(); + doUnifiedCompletionInfer(model, new UnifiedChatInput(request, true), timeout, listener); } public void chunkedInfer( @@ -104,6 +130,13 @@ protected abstract void doInfer( ActionListener listener ); + protected abstract void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ); + protected abstract void doChunkedInfer( Model model, DocumentsOnlyInput inputs, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index ec4b8d9bb4d3d..7d05bac363fb1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -776,5 +776,9 @@ public static T nonNullOrDefault(@Nullable T requestValue, @Nullable T origi return requestValue == null ? originalSettingsValue : requestValue; } + public static void throwUnsupportedUnifiedCompletionOperation(String serviceName) { + throw new UnsupportedOperationException(Strings.format("The %s service does not support unified completion", serviceName)); + } + private ServiceUtils() {} } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java index 2637d9755bd55..2547b9f79ac73 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.external.request.alibabacloudsearch.AlibabaCloudSearchUtils; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; @@ -57,14 +58,13 @@ import java.util.Map; import java.util.stream.Stream; -import static org.elasticsearch.inference.TaskType.SPARSE_EMBEDDING; -import static org.elasticsearch.inference.TaskType.TEXT_EMBEDDING; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMap; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceFields.EMBEDDING_MAX_BATCH_SIZE; import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceSettings.HOST; import static org.elasticsearch.xpack.inference.services.alibabacloudsearch.AlibabaCloudSearchServiceSettings.HTTP_SCHEMA_NAME; @@ -261,6 +261,16 @@ public AlibabaCloudSearchModel parsePersistedConfig(String inferenceEntityId, Ta ); } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override public void doInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java index 48b3c3df03e11..d224e50bb650d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -64,6 +65,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; @@ -89,6 +91,16 @@ public AmazonBedrockService( this.amazonBedrockSender = amazonBedrockFactory.createSender(); } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java index b3d503de8e3eb..f1840af18779f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -52,6 +53,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; public class AnthropicService extends SenderService { public static final String NAME = "anthropic"; @@ -192,6 +194,16 @@ public EnumSet supportedTaskTypes() { return supportedTaskTypes; } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override public void doInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java index bba331fc0b5df..f8ea11e4b15a5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java @@ -38,6 +38,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -63,6 +64,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.ENDPOINT_TYPE_FIELD; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.PROVIDER_FIELD; import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.TARGET_FIELD; @@ -81,6 +83,16 @@ public AzureAiStudioService(HttpRequestSender.Factory factory, ServiceComponents super(factory, serviceComponents); } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java index 16c94dfa9ad94..a38c265d2613c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -58,6 +59,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.API_VERSION; import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.DEPLOYMENT_ID; import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.RESOURCE_NAME; @@ -233,6 +235,16 @@ public EnumSet supportedTaskTypes() { return supportedTaskTypes; } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index b3d8b3b6efce3..ccb8d79dacd6c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -58,6 +59,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.cohere.CohereServiceFields.EMBEDDING_MAX_BATCH_SIZE; public class CohereService extends SenderService { @@ -232,6 +234,16 @@ public EnumSet supportedTaskTypes() { return supportedTaskTypes; } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override public void doInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java index 8acef40840636..f107d64f93e4e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -55,6 +56,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; public class ElasticInferenceService extends SenderService { @@ -73,6 +75,16 @@ public ElasticInferenceService( this.elasticInferenceServiceComponents = eisComponents; } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 2d60e7343f762..5856e08c8dc9b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -31,6 +31,7 @@ import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskSettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; @@ -77,6 +78,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings.MODEL_ID; import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS; import static org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings.NUM_THREADS; @@ -578,6 +580,16 @@ private static CustomElandEmbeddingModel updateModelWithEmbeddingDetails(CustomE ); } + @Override + public void unifiedCompletionInfer( + Model model, + UnifiedCompletionRequest request, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override public void infer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java index 57a8a66a3f3a6..b681722a82136 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.GoogleAiStudioEmbeddingsRequestManager; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -64,6 +65,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.googleaistudio.GoogleAiStudioServiceFields.EMBEDDING_MAX_BATCH_SIZE; public class GoogleAiStudioService extends SenderService { @@ -282,9 +284,8 @@ protected void doInfer( ) { if (model instanceof GoogleAiStudioCompletionModel completionModel) { var requestManager = new GoogleAiStudioCompletionRequestManager(completionModel, getServiceComponents().threadPool()); - var docsOnly = DocumentsOnlyInput.of(inputs); var failedToSendRequestErrorMessage = constructFailedToSendRequestMessage( - completionModel.uri(docsOnly.stream()), + completionModel.uri(inputs.stream()), "Google AI Studio completion" ); var action = new SingleInputSenderExecutableAction( @@ -308,6 +309,16 @@ protected void doInfer( } } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doChunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java index 857d475499aae..87a2d98dca92c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -57,6 +58,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.LOCATION; import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.PROJECT_ID; @@ -206,6 +208,16 @@ protected void doInfer( action.execute(inputs, timeout, listener); } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doChunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index 51cca72f26054..b74ec01cd76e7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -18,6 +18,7 @@ import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; +import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.SettingsConfiguration; @@ -31,6 +32,7 @@ import org.elasticsearch.xpack.inference.external.action.huggingface.HuggingFaceActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; @@ -47,6 +49,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; public class HuggingFaceService extends HuggingFaceBaseService { public static final String NAME = "hugging_face"; @@ -139,6 +142,16 @@ protected void doChunkedInfer( } } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override public InferenceServiceConfiguration getConfiguration() { return Configuration.get(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index 75920efa251f2..5b038781b96af 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceBaseService; @@ -49,6 +50,7 @@ import java.util.Map; import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings.URL; public class HuggingFaceElserService extends HuggingFaceBaseService { @@ -81,6 +83,16 @@ protected HuggingFaceModel createModel( }; } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doChunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java index 981a3e95808ef..cc66d5fd7ee74 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -57,6 +58,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings.URL; import static org.elasticsearch.xpack.inference.services.ibmwatsonx.IbmWatsonxServiceFields.API_VERSION; import static org.elasticsearch.xpack.inference.services.ibmwatsonx.IbmWatsonxServiceFields.EMBEDDING_MAX_BATCH_SIZE; @@ -276,6 +278,16 @@ protected void doInfer( action.execute(input, timeout, listener); } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doChunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java index fe0edb851902b..881e7d36f2a21 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -58,6 +59,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwUnsupportedUnifiedCompletionOperation; import static org.elasticsearch.xpack.inference.services.mistral.MistralConstants.MODEL_FIELD; public class MistralService extends SenderService { @@ -88,6 +90,16 @@ protected void doInfer( } } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + throwUnsupportedUnifiedCompletionOperation(NAME); + } + @Override protected void doChunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 20ff1c617d21f..7b51b068708ca 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -32,10 +32,13 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.external.action.SenderExecutableAction; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.OpenAiUnifiedCompletionRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.SenderService; import org.elasticsearch.xpack.inference.services.ServiceComponents; @@ -53,6 +56,8 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; +import static org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator.COMPLETION_ERROR_PREFIX; import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; @@ -257,6 +262,28 @@ public void doInfer( action.execute(inputs, timeout, listener); } + @Override + public void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) { + if (model instanceof OpenAiChatCompletionModel == false) { + listener.onFailure(createInvalidModelException(model)); + return; + } + + OpenAiChatCompletionModel openAiModel = (OpenAiChatCompletionModel) model; + + var overriddenModel = OpenAiChatCompletionModel.of(openAiModel, inputs.getRequest()); + var requestCreator = OpenAiUnifiedCompletionRequestManager.of(overriddenModel, getServiceComponents().threadPool()); + var errorMessage = constructFailedToSendRequestMessage(overriddenModel.getServiceSettings().uri(), COMPLETION_ERROR_PREFIX); + var action = new SenderExecutableAction(getSender(), requestCreator, errorMessage); + + action.execute(inputs, timeout, listener); + } + @Override protected void doChunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java index e721cd2955cf3..7d79d64b3a771 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModel.java @@ -13,6 +13,7 @@ import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; @@ -24,6 +25,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; @@ -38,6 +40,26 @@ public static OpenAiChatCompletionModel of(OpenAiChatCompletionModel model, Map< return new OpenAiChatCompletionModel(model, OpenAiChatCompletionTaskSettings.of(model.getTaskSettings(), requestTaskSettings)); } + public static OpenAiChatCompletionModel of(OpenAiChatCompletionModel model, UnifiedCompletionRequest request) { + var originalModelServiceSettings = model.getServiceSettings(); + var overriddenServiceSettings = new OpenAiChatCompletionServiceSettings( + Objects.requireNonNullElse(request.model(), originalModelServiceSettings.modelId()), + originalModelServiceSettings.uri(), + originalModelServiceSettings.organizationId(), + originalModelServiceSettings.maxInputTokens(), + originalModelServiceSettings.rateLimitSettings() + ); + + return new OpenAiChatCompletionModel( + model.getInferenceEntityId(), + model.getTaskType(), + model.getConfigurations().getService(), + overriddenServiceSettings, + model.getTaskSettings(), + model.getSecretSettings() + ); + } + public OpenAiChatCompletionModel( String inferenceEntityId, TaskType taskType, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java index 8029d8579baba..7ef7f85d71a6a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettings.java @@ -48,5 +48,4 @@ public static OpenAiChatCompletionRequestTaskSettings fromMap(Map(5); - attributes.put("service", service); - attributes.put("task_type", taskType.toString()); - if (modelId != null) { - attributes.put("model_id", modelId); - } - - inferenceAPMRequestCounter.incrementBy(1, attributes); - } - - public static ApmInferenceStats create(MeterRegistry meterRegistry) { - return new ApmInferenceStats( - meterRegistry.registerLongCounter( - "es.inference.requests.count.total", - "Inference API request counts for a particular service, task type, model ID", - "operations" - ) - ); - } -} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/telemetry/InferenceStats.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/telemetry/InferenceStats.java index d080e818e45fc..bb0de7ba47d51 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/telemetry/InferenceStats.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/telemetry/InferenceStats.java @@ -7,15 +7,89 @@ package org.elasticsearch.xpack.inference.telemetry; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.UnparsedModel; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; -public interface InferenceStats { +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; - /** - * Increment the counter for a particular value in a thread safe manner. - * @param model the model to increment request count for - */ - void incrementRequestCount(Model model); +import static java.util.Map.entry; +import static java.util.stream.Stream.concat; - InferenceStats NOOP = model -> {}; +public record InferenceStats(LongCounter requestCount, LongHistogram inferenceDuration) { + + public InferenceStats { + Objects.requireNonNull(requestCount); + Objects.requireNonNull(inferenceDuration); + } + + public static InferenceStats create(MeterRegistry meterRegistry) { + return new InferenceStats( + meterRegistry.registerLongCounter( + "es.inference.requests.count.total", + "Inference API request counts for a particular service, task type, model ID", + "operations" + ), + meterRegistry.registerLongHistogram( + "es.inference.requests.time", + "Inference API request counts for a particular service, task type, model ID", + "ms" + ) + ); + } + + public static Map modelAttributes(Model model) { + return toMap(modelAttributeEntries(model)); + } + + private static Stream> modelAttributeEntries(Model model) { + var stream = Stream.>builder() + .add(entry("service", model.getConfigurations().getService())) + .add(entry("task_type", model.getTaskType().toString())); + if (model.getServiceSettings().modelId() != null) { + stream.add(entry("model_id", model.getServiceSettings().modelId())); + } + return stream.build(); + } + + private static Map toMap(Stream> stream) { + return stream.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + public static Map responseAttributes(Model model, @Nullable Throwable t) { + return toMap(concat(modelAttributeEntries(model), errorAttributes(t))); + } + + public static Map responseAttributes(UnparsedModel model, @Nullable Throwable t) { + var unknownModelAttributes = Stream.>builder() + .add(entry("service", model.service())) + .add(entry("task_type", model.taskType().toString())) + .build(); + + return toMap(concat(unknownModelAttributes, errorAttributes(t))); + } + + public static Map responseAttributes(@Nullable Throwable t) { + return toMap(errorAttributes(t)); + } + + private static Stream> errorAttributes(@Nullable Throwable t) { + if (t == null) { + return Stream.of(entry("status_code", 200)); + } else if (t instanceof ElasticsearchStatusException ese) { + return Stream.>builder() + .add(entry("status_code", ese.status().getStatus())) + .add(entry("error.type", String.valueOf(ese.status().getStatus()))) + .build(); + } else { + return Stream.of(entry("error.type", t.getClass().getSimpleName())); + } + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/telemetry/InferenceTimer.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/telemetry/InferenceTimer.java new file mode 100644 index 0000000000000..d43f4954edb52 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/telemetry/InferenceTimer.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.telemetry; + +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.Objects; + +public record InferenceTimer(Instant startTime, Clock clock) { + + public InferenceTimer { + Objects.requireNonNull(startTime); + Objects.requireNonNull(clock); + } + + public static InferenceTimer start() { + return start(Clock.systemUTC()); + } + + public static InferenceTimer start(Clock clock) { + return new InferenceTimer(clock.instant(), clock); + } + + public long elapsedMillis() { + return Duration.between(startTime(), clock().instant()).toMillis(); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/TaskTypeTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/TaskTypeTests.java new file mode 100644 index 0000000000000..f6c058bdbb79f --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/TaskTypeTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +public class TaskTypeTests extends ESTestCase { + + public void testFromStringOrStatusException() { + var exception = expectThrows(ElasticsearchStatusException.class, () -> TaskType.fromStringOrStatusException(null)); + assertThat(exception.getMessage(), Matchers.is("Task type must not be null")); + + exception = expectThrows(ElasticsearchStatusException.class, () -> TaskType.fromStringOrStatusException("blah")); + assertThat(exception.getMessage(), Matchers.is("Unknown task_type [blah]")); + + assertThat(TaskType.fromStringOrStatusException("any"), Matchers.is(TaskType.ANY)); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index 5abb9000f4d04..9395ae222e9ba 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -19,6 +19,7 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; import org.elasticsearch.xpack.inference.common.Truncator; @@ -160,9 +161,11 @@ public static Model getInvalidModel(String inferenceEntityId, String serviceName var mockConfigs = mock(ModelConfigurations.class); when(mockConfigs.getInferenceEntityId()).thenReturn(inferenceEntityId); when(mockConfigs.getService()).thenReturn(serviceName); + when(mockConfigs.getTaskType()).thenReturn(TaskType.TEXT_EMBEDDING); var mockModel = mock(Model.class); when(mockModel.getConfigurations()).thenReturn(mockConfigs); + when(mockModel.getTaskType()).thenReturn(TaskType.TEXT_EMBEDDING); return mockModel; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceActionTestCase.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceActionTestCase.java new file mode 100644 index 0000000000000..47f3a0e0b57aa --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceActionTestCase.java @@ -0,0 +1,364 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.inference.InferenceService; +import org.elasticsearch.inference.InferenceServiceRegistry; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnparsedModel; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.inference.action.BaseInferenceActionRequest; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; +import org.elasticsearch.xpack.inference.registry.ModelRegistry; +import org.elasticsearch.xpack.inference.telemetry.InferenceStats; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Flow; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isA; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.assertArg; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public abstract class BaseTransportInferenceActionTestCase extends ESTestCase { + private ModelRegistry modelRegistry; + private StreamingTaskManager streamingTaskManager; + private BaseTransportInferenceAction action; + + protected static final String serviceId = "serviceId"; + protected static final TaskType taskType = TaskType.COMPLETION; + protected static final String inferenceId = "inferenceEntityId"; + protected InferenceServiceRegistry serviceRegistry; + protected InferenceStats inferenceStats; + + @Before + public void setUp() throws Exception { + super.setUp(); + TransportService transportService = mock(); + ActionFilters actionFilters = mock(); + modelRegistry = mock(); + serviceRegistry = mock(); + inferenceStats = new InferenceStats(mock(), mock()); + streamingTaskManager = mock(); + action = createAction(transportService, actionFilters, modelRegistry, serviceRegistry, inferenceStats, streamingTaskManager); + } + + protected abstract BaseTransportInferenceAction createAction( + TransportService transportService, + ActionFilters actionFilters, + ModelRegistry modelRegistry, + InferenceServiceRegistry serviceRegistry, + InferenceStats inferenceStats, + StreamingTaskManager streamingTaskManager + ); + + protected abstract Request createRequest(); + + public void testMetricsAfterModelRegistryError() { + var expectedException = new IllegalStateException("hello"); + var expectedError = expectedException.getClass().getSimpleName(); + + doAnswer(ans -> { + ActionListener listener = ans.getArgument(1); + listener.onFailure(expectedException); + return null; + }).when(modelRegistry).getModelWithSecrets(any(), any()); + + var listener = doExecute(taskType); + verify(listener).onFailure(same(expectedException)); + + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), nullValue()); + assertThat(attributes.get("task_type"), nullValue()); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), nullValue()); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + protected ActionListener doExecute(TaskType taskType) { + return doExecute(taskType, false); + } + + protected ActionListener doExecute(TaskType taskType, boolean stream) { + Request request = createRequest(); + when(request.getInferenceEntityId()).thenReturn(inferenceId); + when(request.getTaskType()).thenReturn(taskType); + when(request.isStreaming()).thenReturn(stream); + ActionListener listener = mock(); + action.doExecute(mock(), request, listener); + return listener; + } + + public void testMetricsAfterMissingService() { + mockModelRegistry(taskType); + + when(serviceRegistry.getService(any())).thenReturn(Optional.empty()); + + var listener = doExecute(taskType); + + verify(listener).onFailure(assertArg(e -> { + assertThat(e, isA(ElasticsearchStatusException.class)); + assertThat(e.getMessage(), is("Unknown service [" + serviceId + "] for model [" + inferenceId + "]. ")); + assertThat(((ElasticsearchStatusException) e).status(), is(RestStatus.BAD_REQUEST)); + })); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(RestStatus.BAD_REQUEST.getStatus())); + assertThat(attributes.get("error.type"), is(String.valueOf(RestStatus.BAD_REQUEST.getStatus()))); + })); + } + + protected void mockModelRegistry(TaskType expectedTaskType) { + var unparsedModel = new UnparsedModel(inferenceId, expectedTaskType, serviceId, Map.of(), Map.of()); + doAnswer(ans -> { + ActionListener listener = ans.getArgument(1); + listener.onResponse(unparsedModel); + return null; + }).when(modelRegistry).getModelWithSecrets(any(), any()); + } + + public void testMetricsAfterUnknownTaskType() { + var modelTaskType = TaskType.RERANK; + var requestTaskType = TaskType.SPARSE_EMBEDDING; + mockModelRegistry(modelTaskType); + when(serviceRegistry.getService(any())).thenReturn(Optional.of(mock())); + + var listener = doExecute(requestTaskType); + + verify(listener).onFailure(assertArg(e -> { + assertThat(e, isA(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is( + "Incompatible task_type, the requested type [" + + requestTaskType + + "] does not match the model type [" + + modelTaskType + + "]" + ) + ); + assertThat(((ElasticsearchStatusException) e).status(), is(RestStatus.BAD_REQUEST)); + })); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(modelTaskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(RestStatus.BAD_REQUEST.getStatus())); + assertThat(attributes.get("error.type"), is(String.valueOf(RestStatus.BAD_REQUEST.getStatus()))); + })); + } + + public void testMetricsAfterInferError() { + var expectedException = new IllegalStateException("hello"); + var expectedError = expectedException.getClass().getSimpleName(); + mockService(listener -> listener.onFailure(expectedException)); + + var listener = doExecute(taskType); + + verify(listener).onFailure(same(expectedException)); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), nullValue()); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + public void testMetricsAfterStreamUnsupported() { + var expectedStatus = RestStatus.METHOD_NOT_ALLOWED; + var expectedError = String.valueOf(expectedStatus.getStatus()); + mockService(l -> {}); + + var listener = doExecute(taskType, true); + + verify(listener).onFailure(assertArg(e -> { + assertThat(e, isA(ElasticsearchStatusException.class)); + var ese = (ElasticsearchStatusException) e; + assertThat(ese.getMessage(), is("Streaming is not allowed for service [" + serviceId + "].")); + assertThat(ese.status(), is(expectedStatus)); + })); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(expectedStatus.getStatus())); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + public void testMetricsAfterInferSuccess() { + mockService(listener -> listener.onResponse(mock())); + + var listener = doExecute(taskType); + + verify(listener).onResponse(any()); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(200)); + assertThat(attributes.get("error.type"), nullValue()); + })); + } + + public void testMetricsAfterStreamInferSuccess() { + mockStreamResponse(Flow.Subscriber::onComplete); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(200)); + assertThat(attributes.get("error.type"), nullValue()); + })); + } + + public void testMetricsAfterStreamInferFailure() { + var expectedException = new IllegalStateException("hello"); + var expectedError = expectedException.getClass().getSimpleName(); + mockStreamResponse(subscriber -> { + subscriber.subscribe(mock()); + subscriber.onError(expectedException); + }); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), nullValue()); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + public void testMetricsAfterStreamCancel() { + var response = mockStreamResponse(s -> s.onSubscribe(mock())); + response.subscribe(new Flow.Subscriber<>() { + @Override + public void onSubscribe(Flow.Subscription subscription) { + subscription.cancel(); + } + + @Override + public void onNext(ChunkedToXContent item) { + + } + + @Override + public void onError(Throwable throwable) { + + } + + @Override + public void onComplete() { + + } + }); + + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(200)); + assertThat(attributes.get("error.type"), nullValue()); + })); + } + + protected Flow.Publisher mockStreamResponse(Consumer> action) { + mockService(true, Set.of(), listener -> { + Flow.Processor taskProcessor = mock(); + doAnswer(innerAns -> { + action.accept(innerAns.getArgument(0)); + return null; + }).when(taskProcessor).subscribe(any()); + when(streamingTaskManager.create(any(), any())).thenReturn(taskProcessor); + var inferenceServiceResults = mock(InferenceServiceResults.class); + when(inferenceServiceResults.publisher()).thenReturn(mock()); + listener.onResponse(inferenceServiceResults); + }); + + var listener = doExecute(taskType, true); + var captor = ArgumentCaptor.forClass(InferenceAction.Response.class); + verify(listener).onResponse(captor.capture()); + assertTrue(captor.getValue().isStreaming()); + assertNotNull(captor.getValue().publisher()); + return captor.getValue().publisher(); + } + + protected void mockService(Consumer> listenerAction) { + mockService(false, Set.of(), listenerAction); + } + + protected void mockService( + boolean stream, + Set supportedStreamingTasks, + Consumer> listenerAction + ) { + InferenceService service = mock(); + Model model = mockModel(); + when(service.parsePersistedConfigWithSecrets(any(), any(), any(), any())).thenReturn(model); + when(service.name()).thenReturn(serviceId); + + when(service.canStream(any())).thenReturn(stream); + when(service.supportedStreamingTasks()).thenReturn(supportedStreamingTasks); + doAnswer(ans -> { + listenerAction.accept(ans.getArgument(7)); + return null; + }).when(service).infer(any(), any(), any(), anyBoolean(), any(), any(), any(), any()); + doAnswer(ans -> { + listenerAction.accept(ans.getArgument(3)); + return null; + }).when(service).unifiedCompletionInfer(any(), any(), any(), any()); + mockModelAndServiceRegistry(service); + } + + protected Model mockModel() { + Model model = mock(); + ModelConfigurations modelConfigurations = mock(); + when(modelConfigurations.getService()).thenReturn(serviceId); + when(model.getConfigurations()).thenReturn(modelConfigurations); + when(model.getTaskType()).thenReturn(taskType); + when(model.getServiceSettings()).thenReturn(mock()); + return model; + } + + protected void mockModelAndServiceRegistry(InferenceService service) { + var unparsedModel = new UnparsedModel(inferenceId, taskType, serviceId, Map.of(), Map.of()); + doAnswer(ans -> { + ActionListener listener = ans.getArgument(1); + listener.onResponse(unparsedModel); + return null; + }).when(modelRegistry).getModelWithSecrets(any(), any()); + + when(serviceRegistry.getService(any())).thenReturn(Optional.of(service)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceActionTests.java new file mode 100644 index 0000000000000..e54175cb27009 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceActionTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.inference.InferenceServiceRegistry; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; +import org.elasticsearch.xpack.inference.registry.ModelRegistry; +import org.elasticsearch.xpack.inference.telemetry.InferenceStats; + +import static org.mockito.Mockito.mock; + +public class TransportInferenceActionTests extends BaseTransportInferenceActionTestCase { + + @Override + protected BaseTransportInferenceAction createAction( + TransportService transportService, + ActionFilters actionFilters, + ModelRegistry modelRegistry, + InferenceServiceRegistry serviceRegistry, + InferenceStats inferenceStats, + StreamingTaskManager streamingTaskManager + ) { + return new TransportInferenceAction( + transportService, + actionFilters, + modelRegistry, + serviceRegistry, + inferenceStats, + streamingTaskManager + ); + } + + @Override + protected InferenceAction.Request createRequest() { + return mock(); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportUnifiedCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportUnifiedCompletionActionTests.java new file mode 100644 index 0000000000000..4c943599ce523 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportUnifiedCompletionActionTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.inference.InferenceServiceRegistry; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; +import org.elasticsearch.xpack.inference.action.task.StreamingTaskManager; +import org.elasticsearch.xpack.inference.registry.ModelRegistry; +import org.elasticsearch.xpack.inference.telemetry.InferenceStats; + +import java.util.Optional; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isA; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.assertArg; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TransportUnifiedCompletionActionTests extends BaseTransportInferenceActionTestCase { + + @Override + protected BaseTransportInferenceAction createAction( + TransportService transportService, + ActionFilters actionFilters, + ModelRegistry modelRegistry, + InferenceServiceRegistry serviceRegistry, + InferenceStats inferenceStats, + StreamingTaskManager streamingTaskManager + ) { + return new TransportUnifiedCompletionInferenceAction( + transportService, + actionFilters, + modelRegistry, + serviceRegistry, + inferenceStats, + streamingTaskManager + ); + } + + @Override + protected UnifiedCompletionAction.Request createRequest() { + return mock(); + } + + public void testThrows_IncompatibleTaskTypeException_WhenUsingATextEmbeddingInferenceEndpoint() { + var modelTaskType = TaskType.TEXT_EMBEDDING; + var requestTaskType = TaskType.TEXT_EMBEDDING; + mockModelRegistry(modelTaskType); + when(serviceRegistry.getService(any())).thenReturn(Optional.of(mock())); + + var listener = doExecute(requestTaskType); + + verify(listener).onFailure(assertArg(e -> { + assertThat(e, isA(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Incompatible task_type for unified API, the requested type [" + requestTaskType + "] must be one of [completion]") + ); + assertThat(((ElasticsearchStatusException) e).status(), is(RestStatus.BAD_REQUEST)); + })); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(modelTaskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(RestStatus.BAD_REQUEST.getStatus())); + assertThat(attributes.get("error.type"), is(String.valueOf(RestStatus.BAD_REQUEST.getStatus()))); + })); + } + + public void testThrows_IncompatibleTaskTypeException_WhenUsingRequestIsAny_ModelIsTextEmbedding() { + var modelTaskType = TaskType.ANY; + var requestTaskType = TaskType.TEXT_EMBEDDING; + mockModelRegistry(modelTaskType); + when(serviceRegistry.getService(any())).thenReturn(Optional.of(mock())); + + var listener = doExecute(requestTaskType); + + verify(listener).onFailure(assertArg(e -> { + assertThat(e, isA(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Incompatible task_type for unified API, the requested type [" + requestTaskType + "] must be one of [completion]") + ); + assertThat(((ElasticsearchStatusException) e).status(), is(RestStatus.BAD_REQUEST)); + })); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(modelTaskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(RestStatus.BAD_REQUEST.getStatus())); + assertThat(attributes.get("error.type"), is(String.valueOf(RestStatus.BAD_REQUEST.getStatus()))); + })); + } + + public void testMetricsAfterUnifiedInferSuccess_WithRequestTaskTypeAny() { + mockModelRegistry(TaskType.COMPLETION); + mockService(listener -> listener.onResponse(mock())); + + var listener = doExecute(TaskType.ANY); + + verify(listener).onResponse(any()); + verify(inferenceStats.inferenceDuration()).record(anyLong(), assertArg(attributes -> { + assertThat(attributes.get("service"), is(serviceId)); + assertThat(attributes.get("task_type"), is(taskType.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(200)); + assertThat(attributes.get("error.type"), nullValue()); + })); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index 2416aeb62ff33..c68a629b999c5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -102,7 +102,7 @@ public void testFilterNoop() throws Exception { new BulkItemRequest[0] ); request.setInferenceFieldMap( - Map.of("foo", new InferenceFieldMetadata("foo", "bar", generateRandomStringArray(5, 10, false, false))) + Map.of("foo", new InferenceFieldMetadata("foo", "bar", "baz", generateRandomStringArray(5, 10, false, false))) ); filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain); awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java index a82d2f474ca4a..dec7d15760aa6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; import org.elasticsearch.xpack.core.ml.search.WeightedToken; +import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.List; @@ -31,16 +32,62 @@ public class EmbeddingRequestChunkerTests extends ESTestCase { - public void testEmptyInput() { + public void testEmptyInput_WordChunker() { var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); var batches = new EmbeddingRequestChunker(List.of(), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); assertThat(batches, empty()); } - public void testBlankInput() { + public void testEmptyInput_SentenceChunker() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(), 10, embeddingType, new SentenceBoundaryChunkingSettings(250, 1)) + .batchRequestsWithListeners(testListener()); + assertThat(batches, empty()); + } + + public void testWhitespaceInput_SentenceChunker() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(" "), 10, embeddingType, new SentenceBoundaryChunkingSettings(250, 1)) + .batchRequestsWithListeners(testListener()); + assertThat(batches, hasSize(1)); + assertThat(batches.get(0).batch().inputs(), hasSize(1)); + assertThat(batches.get(0).batch().inputs().get(0), Matchers.is(" ")); + } + + public void testBlankInput_WordChunker() { var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); var batches = new EmbeddingRequestChunker(List.of(""), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); assertThat(batches, hasSize(1)); + assertThat(batches.get(0).batch().inputs(), hasSize(1)); + assertThat(batches.get(0).batch().inputs().get(0), Matchers.is("")); + } + + public void testBlankInput_SentenceChunker() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(""), 10, embeddingType, new SentenceBoundaryChunkingSettings(250, 1)) + .batchRequestsWithListeners(testListener()); + assertThat(batches, hasSize(1)); + assertThat(batches.get(0).batch().inputs(), hasSize(1)); + assertThat(batches.get(0).batch().inputs().get(0), Matchers.is("")); + } + + public void testInputThatDoesNotChunk_WordChunker() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of("ABBAABBA"), 100, 100, 10, embeddingType).batchRequestsWithListeners( + testListener() + ); + assertThat(batches, hasSize(1)); + assertThat(batches.get(0).batch().inputs(), hasSize(1)); + assertThat(batches.get(0).batch().inputs().get(0), Matchers.is("ABBAABBA")); + } + + public void testInputThatDoesNotChunk_SentenceChunker() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of("ABBAABBA"), 10, embeddingType, new SentenceBoundaryChunkingSettings(250, 1)) + .batchRequestsWithListeners(testListener()); + assertThat(batches, hasSize(1)); + assertThat(batches.get(0).batch().inputs(), hasSize(1)); + assertThat(batches.get(0).batch().inputs().get(0), Matchers.is("ABBAABBA")); } public void testShortInputsAreSingleBatch() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java index de943f7f57ab8..f81894ccd4bbb 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java @@ -43,6 +43,41 @@ private List textChunks( return chunkPositions.stream().map(offset -> input.substring(offset.start(), offset.end())).collect(Collectors.toList()); } + public void testEmptyString() { + var chunks = textChunks(new SentenceBoundaryChunker(), "", 100, randomBoolean()); + assertThat(chunks, hasSize(1)); + assertThat(chunks.get(0), Matchers.is("")); + } + + public void testBlankString() { + var chunks = textChunks(new SentenceBoundaryChunker(), " ", 100, randomBoolean()); + assertThat(chunks, hasSize(1)); + assertThat(chunks.get(0), Matchers.is(" ")); + } + + public void testSingleChar() { + var chunks = textChunks(new SentenceBoundaryChunker(), " b", 100, randomBoolean()); + assertThat(chunks, Matchers.contains(" b")); + + chunks = textChunks(new SentenceBoundaryChunker(), "b", 100, randomBoolean()); + assertThat(chunks, Matchers.contains("b")); + + chunks = textChunks(new SentenceBoundaryChunker(), ". ", 100, randomBoolean()); + assertThat(chunks, Matchers.contains(". ")); + + chunks = textChunks(new SentenceBoundaryChunker(), " , ", 100, randomBoolean()); + assertThat(chunks, Matchers.contains(" , ")); + + chunks = textChunks(new SentenceBoundaryChunker(), " ,", 100, randomBoolean()); + assertThat(chunks, Matchers.contains(" ,")); + } + + public void testSingleCharRepeated() { + var input = "a".repeat(32_000); + var chunks = textChunks(new SentenceBoundaryChunker(), input, 100, randomBoolean()); + assertThat(chunks, Matchers.contains(input)); + } + public void testChunkSplitLargeChunkSizes() { for (int maxWordsPerChunk : new int[] { 100, 200 }) { var chunker = new SentenceBoundaryChunker(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java index 2ef28f2cf2e77..b4fa5c9122258 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import java.util.List; import java.util.Locale; @@ -71,10 +72,6 @@ public class WordBoundaryChunkerTests extends ESTestCase { * Use the chunk functions that return offsets where possible */ List textChunks(WordBoundaryChunker chunker, String input, int chunkSize, int overlap) { - if (input.isEmpty()) { - return List.of(""); - } - var chunkPositions = chunker.chunk(input, chunkSize, overlap); return chunkPositions.stream().map(p -> input.substring(p.start(), p.end())).collect(Collectors.toList()); } @@ -240,6 +237,35 @@ public void testWhitespace() { assertThat(chunks, contains(" ")); } + public void testBlankString() { + var chunks = textChunks(new WordBoundaryChunker(), " ", 100, 10); + assertThat(chunks, hasSize(1)); + assertThat(chunks.get(0), Matchers.is(" ")); + } + + public void testSingleChar() { + var chunks = textChunks(new WordBoundaryChunker(), " b", 100, 10); + assertThat(chunks, Matchers.contains(" b")); + + chunks = textChunks(new WordBoundaryChunker(), "b", 100, 10); + assertThat(chunks, Matchers.contains("b")); + + chunks = textChunks(new WordBoundaryChunker(), ". ", 100, 10); + assertThat(chunks, Matchers.contains(". ")); + + chunks = textChunks(new WordBoundaryChunker(), " , ", 100, 10); + assertThat(chunks, Matchers.contains(" , ")); + + chunks = textChunks(new WordBoundaryChunker(), " ,", 100, 10); + assertThat(chunks, Matchers.contains(" ,")); + } + + public void testSingleCharRepeated() { + var input = "a".repeat(32_000); + var chunks = textChunks(new WordBoundaryChunker(), input, 100, 10); + assertThat(chunks, Matchers.contains(input)); + } + public void testPunctuation() { int chunkSize = 1; var chunks = textChunks(new WordBoundaryChunker(), "Comma, separated", chunkSize, 0); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableActionTests.java index d4ab9b1f1e19a..9e7c58b0ca79e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableActionTests.java @@ -61,25 +61,11 @@ public void testOneInputIsValid() { assertTrue("Test failed to call listener.", testRan.get()); } - public void testInvalidInputType() { - var badInput = mock(InferenceInputs.class); - var actualException = new AtomicReference(); - - executableAction.execute( - badInput, - mock(TimeValue.class), - ActionListener.wrap(shouldNotSucceed -> fail("Test failed."), actualException::set) - ); - - assertThat(actualException.get(), notNullValue()); - assertThat(actualException.get().getMessage(), is("Invalid inference input type")); - assertThat(actualException.get(), instanceOf(ElasticsearchStatusException.class)); - assertThat(((ElasticsearchStatusException) actualException.get()).status(), is(RestStatus.INTERNAL_SERVER_ERROR)); - } - public void testMoreThanOneInput() { var badInput = mock(DocumentsOnlyInput.class); - when(badInput.getInputs()).thenReturn(List.of("one", "two")); + var input = List.of("one", "two"); + when(badInput.getInputs()).thenReturn(input); + when(badInput.inputSize()).thenReturn(input.size()); var actualException = new AtomicReference(); executableAction.execute( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java index 87d3a82b4aae6..e7543aa6ba9e5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockMockRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; @@ -130,7 +131,7 @@ public void testCompletionRequestAction() throws IOException { ); var action = creator.create(model, Map.of()); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); assertThat(result.asMap(), is(buildExpectationCompletion(List.of("test input string")))); @@ -163,7 +164,7 @@ public void testChatCompletionRequestAction_HandlesException() throws IOExceptio ); var action = creator.create(model, Map.of()); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); assertThat(sender.sendCount(), is(1)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreatorTests.java index a3114300c5ddc..f0de37ceaaf98 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreatorTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicRequestUtils; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -49,6 +49,7 @@ import static org.mockito.Mockito.mock; public class AnthropicActionCreatorTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); private final MockWebServer webServer = new MockWebServer(); private ThreadPool threadPool; @@ -103,7 +104,7 @@ public void testCreate_ChatCompletionModel() throws IOException { var action = actionCreator.create(model, overriddenTaskSettings); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -168,7 +169,7 @@ public void testCreate_ChatCompletionModel_FailsFromInvalidResponseFormat() thro var action = actionCreator.create(model, overriddenTaskSettings); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); assertThat( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionActionTests.java index fca2e316af17f..2065a726b7589 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionActionTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.inference.external.action.SingleInputSenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.sender.AnthropicCompletionRequestManager; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; @@ -113,7 +113,7 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { var action = createAction(getUrl(webServer), "secret", "model", 1, sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -149,7 +149,7 @@ public void testExecute_ThrowsElasticsearchException() { var action = createAction(getUrl(webServer), "secret", "model", 1, sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -170,7 +170,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled var action = createAction(getUrl(webServer), "secret", "model", 1, sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -187,7 +187,7 @@ public void testExecute_ThrowsException() { var action = createAction(getUrl(webServer), "secret", "model", 1, sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -229,7 +229,7 @@ public void testExecute_ThrowsException_WhenInputIsGreaterThanOne() throws IOExc var action = createAction(getUrl(webServer), "secret", "model", 1, sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureaistudio/AzureAiStudioActionAndCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureaistudio/AzureAiStudioActionAndCreatorTests.java index 8792234102a94..210fab457de10 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureaistudio/AzureAiStudioActionAndCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureaistudio/AzureAiStudioActionAndCreatorTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.inference.common.TruncatorTests; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; @@ -160,7 +161,7 @@ public void testChatCompletionRequestAction() throws IOException { var action = creator.create(model, Map.of()); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiActionCreatorTests.java index 45a2fb0954c79..7e1e3e55caed8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiActionCreatorTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.request.azureopenai.AzureOpenAiUtils; @@ -475,7 +476,7 @@ public void testInfer_AzureOpenAiCompletion_WithOverriddenUser() throws IOExcept var action = actionCreator.create(model, taskSettingsWithUserOverride); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -531,7 +532,7 @@ public void testInfer_AzureOpenAiCompletionModel_WithoutUser() throws IOExceptio var action = actionCreator.create(model, requestTaskSettingsWithoutUser); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -589,7 +590,7 @@ public void testInfer_AzureOpenAiCompletionModel_FailsFromInvalidResponseFormat( var action = actionCreator.create(model, requestTaskSettingsWithoutUser); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); assertThat( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiCompletionActionTests.java index 4c7683c882816..dca12dfda9c98 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiCompletionActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/azureopenai/AzureOpenAiCompletionActionTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.inference.external.action.SingleInputSenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.sender.AzureOpenAiCompletionRequestManager; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.azureopenai.AzureOpenAiUtils; @@ -111,7 +111,7 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { var action = createAction("resource", "deployment", "apiversion", user, apiKey, sender, "id"); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of(completionInput)), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -142,7 +142,7 @@ public void testExecute_ThrowsElasticsearchException() { var action = createAction("resource", "deployment", "apiVersion", "user", "apikey", sender, "id"); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -163,7 +163,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled var action = createAction("resource", "deployment", "apiVersion", "user", "apikey", sender, "id"); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -177,7 +177,7 @@ public void testExecute_ThrowsException() { var action = createAction("resource", "deployment", "apiVersion", "user", "apikey", sender, "id"); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java index 9ec34e7d8e5c5..3a512de25a39c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereActionCreatorTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -197,7 +198,7 @@ public void testCreate_CohereCompletionModel_WithModelSpecified() throws IOExcep var action = actionCreator.create(model, Map.of()); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -257,7 +258,7 @@ public void testCreate_CohereCompletionModel_WithoutModelSpecified() throws IOEx var action = actionCreator.create(model, Map.of()); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereCompletionActionTests.java index ba839e0d7c5e9..c5871adb34864 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereCompletionActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereCompletionActionTests.java @@ -26,8 +26,8 @@ import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.SingleInputSenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.CohereCompletionRequestManager; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.external.request.cohere.CohereUtils; @@ -120,7 +120,7 @@ public void testExecute_ReturnsSuccessfulResponse_WithModelSpecified() throws IO var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -181,7 +181,7 @@ public void testExecute_ReturnsSuccessfulResponse_WithoutModelSpecified() throws var action = createAction(getUrl(webServer), "secret", null, sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -214,7 +214,7 @@ public void testExecute_ThrowsElasticsearchException() { var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -235,7 +235,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -256,7 +256,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled var action = createAction(null, "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -270,7 +270,7 @@ public void testExecute_ThrowsException() { var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -284,7 +284,7 @@ public void testExecute_ThrowsExceptionWithNullUrl() { var action = createAction(null, "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -334,7 +334,7 @@ public void testExecute_ThrowsException_WhenInputIsGreaterThanOne() throws IOExc var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googleaistudio/GoogleAiStudioCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googleaistudio/GoogleAiStudioCompletionActionTests.java index 72b5ffa45a0dd..ff17bbf66e02a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googleaistudio/GoogleAiStudioCompletionActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googleaistudio/GoogleAiStudioCompletionActionTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.SingleInputSenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.GoogleAiStudioCompletionRequestManager; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.Sender; @@ -128,7 +128,7 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("input")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("input")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -159,7 +159,7 @@ public void testExecute_ThrowsElasticsearchException() { var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -180,7 +180,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -197,7 +197,7 @@ public void testExecute_ThrowsException() { var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -260,7 +260,7 @@ public void testExecute_ThrowsException_WhenInputIsGreaterThanOne() throws IOExc var action = createAction(getUrl(webServer), "secret", "model", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java index b6d7eb673b7f0..fe076eb721ea2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiActionCreatorTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -330,7 +331,7 @@ public void testCreate_OpenAiChatCompletionModel() throws IOException { var action = actionCreator.create(model, overriddenTaskSettings); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -345,11 +346,12 @@ public void testCreate_OpenAiChatCompletionModel() throws IOException { assertThat(request.getHeader(ORGANIZATION_HEADER), equalTo("org")); var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(4)); + assertThat(requestMap.size(), is(5)); assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); assertThat(requestMap.get("model"), is("model")); assertThat(requestMap.get("user"), is("overridden_user")); assertThat(requestMap.get("n"), is(1)); + assertThat(requestMap.get("stream"), is(false)); } } @@ -393,7 +395,7 @@ public void testCreate_OpenAiChatCompletionModel_WithoutUser() throws IOExceptio var action = actionCreator.create(model, overriddenTaskSettings); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -408,10 +410,11 @@ public void testCreate_OpenAiChatCompletionModel_WithoutUser() throws IOExceptio assertThat(request.getHeader(ORGANIZATION_HEADER), equalTo("org")); var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(3)); + assertThat(requestMap.size(), is(4)); assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); assertThat(requestMap.get("model"), is("model")); assertThat(requestMap.get("n"), is(1)); + assertThat(requestMap.get("stream"), is(false)); } } @@ -455,7 +458,7 @@ public void testCreate_OpenAiChatCompletionModel_WithoutOrganization() throws IO var action = actionCreator.create(model, overriddenTaskSettings); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -470,11 +473,12 @@ public void testCreate_OpenAiChatCompletionModel_WithoutOrganization() throws IO assertNull(request.getHeader(ORGANIZATION_HEADER)); var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(4)); + assertThat(requestMap.size(), is(5)); assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); assertThat(requestMap.get("model"), is("model")); assertThat(requestMap.get("user"), is("overridden_user")); assertThat(requestMap.get("n"), is(1)); + assertThat(requestMap.get("stream"), is(false)); } } @@ -523,7 +527,7 @@ public void testCreate_OpenAiChatCompletionModel_FailsFromInvalidResponseFormat( var action = actionCreator.create(model, overriddenTaskSettings); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); assertThat( @@ -542,11 +546,12 @@ public void testCreate_OpenAiChatCompletionModel_FailsFromInvalidResponseFormat( assertNull(webServer.requests().get(0).getHeader(ORGANIZATION_HEADER)); var requestMap = entityAsMap(webServer.requests().get(0).getBody()); - assertThat(requestMap.size(), is(4)); + assertThat(requestMap.size(), is(5)); assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); assertThat(requestMap.get("model"), is("model")); assertThat(requestMap.get("user"), is("overridden_user")); assertThat(requestMap.get("n"), is(1)); + assertThat(requestMap.get("stream"), is(false)); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java index d84b2b5bb324a..ba74d2ab42c21 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionActionTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.SingleInputSenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; import org.elasticsearch.xpack.inference.external.http.sender.OpenAiCompletionRequestManager; @@ -119,7 +119,7 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var result = listener.actionGet(TIMEOUT); @@ -134,11 +134,12 @@ public void testExecute_ReturnsSuccessfulResponse() throws IOException { assertThat(request.getHeader(ORGANIZATION_HEADER), equalTo("org")); var requestMap = entityAsMap(request.getBody()); - assertThat(requestMap.size(), is(4)); + assertThat(requestMap.size(), is(5)); assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); assertThat(requestMap.get("model"), is("model")); assertThat(requestMap.get("user"), is("user")); assertThat(requestMap.get("n"), is(1)); + assertThat(requestMap.get("stream"), is(false)); } } @@ -159,7 +160,7 @@ public void testExecute_ThrowsElasticsearchException() { var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -180,7 +181,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -201,7 +202,7 @@ public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled var action = createAction(null, "org", "secret", "model", "user", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -215,7 +216,7 @@ public void testExecute_ThrowsException() { var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -229,7 +230,7 @@ public void testExecute_ThrowsExceptionWithNullUrl() { var action = createAction(null, "org", "secret", "model", "user", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); @@ -273,7 +274,7 @@ public void testExecute_ThrowsException_WhenInputIsGreaterThanOne() throws IOExc var action = createAction(getUrl(webServer), "org", "secret", "model", "user", sender); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new DocumentsOnlyInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + action.execute(new ChatCompletionInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java index e68beaf4c1eb5..929aefeeef6b9 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; @@ -67,8 +68,15 @@ public void send( ActionListener listener ) { sendCounter++; - var docsInput = (DocumentsOnlyInput) inferenceInputs; - inputs.add(docsInput.getInputs()); + if (inferenceInputs instanceof DocumentsOnlyInput docsInput) { + inputs.add(docsInput.getInputs()); + } else if (inferenceInputs instanceof ChatCompletionInput chatCompletionInput) { + inputs.add(chatCompletionInput.getInputs()); + } else { + throw new IllegalArgumentException( + "Invalid inference inputs received in mock sender: " + inferenceInputs.getClass().getSimpleName() + ); + } if (results.isEmpty()) { listener.onFailure(new ElasticsearchException("No results found")); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java index 7fa8a09d5bf12..a8f37aedcece3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockChatCompletionRequestManager; import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockEmbeddingsRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.Sender; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; @@ -107,7 +108,7 @@ public void testCreateSender_SendsCompletionRequestAndReceivesResponse() throws PlainActionFuture listener = new PlainActionFuture<>(); var requestManager = new AmazonBedrockChatCompletionRequestManager(model, threadPool, new TimeValue(30, TimeUnit.SECONDS)); - sender.send(requestManager, new DocumentsOnlyInput(List.of("abc")), null, listener); + sender.send(requestManager, new ChatCompletionInput(List.of("abc")), null, listener); var result = listener.actionGet(TIMEOUT); assertThat(result.asMap(), is(buildExpectationCompletion(List.of("test response text")))); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputsTests.java new file mode 100644 index 0000000000000..f0da67a982374 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputsTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.List; + +public class InferenceInputsTests extends ESTestCase { + public void testCastToSucceeds() { + InferenceInputs inputs = new DocumentsOnlyInput(List.of(), false); + assertThat(inputs.castTo(DocumentsOnlyInput.class), Matchers.instanceOf(DocumentsOnlyInput.class)); + + var emptyRequest = new UnifiedCompletionRequest(List.of(), null, null, null, null, null, null, null); + assertThat(new UnifiedChatInput(emptyRequest, false).castTo(UnifiedChatInput.class), Matchers.instanceOf(UnifiedChatInput.class)); + assertThat( + new QueryAndDocsInputs("hello", List.of(), false).castTo(QueryAndDocsInputs.class), + Matchers.instanceOf(QueryAndDocsInputs.class) + ); + } + + public void testCastToFails() { + InferenceInputs inputs = new DocumentsOnlyInput(List.of(), false); + var exception = expectThrows(IllegalArgumentException.class, () -> inputs.castTo(QueryAndDocsInputs.class)); + assertThat( + exception.getMessage(), + Matchers.containsString( + Strings.format("Unable to convert inference inputs type: [%s] to [%s]", DocumentsOnlyInput.class, QueryAndDocsInputs.class) + ) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/UnifiedChatInputTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/UnifiedChatInputTests.java new file mode 100644 index 0000000000000..42e1b18168aec --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/UnifiedChatInputTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.List; + +public class UnifiedChatInputTests extends ESTestCase { + + public void testConvertsStringInputToMessages() { + var a = new UnifiedChatInput(List.of("hello", "awesome"), "a role", true); + + assertThat(a.inputSize(), Matchers.is(2)); + assertThat( + a.getRequest(), + Matchers.is( + UnifiedCompletionRequest.of( + List.of( + new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("hello"), + "a role", + null, + null, + null + ), + new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("awesome"), + "a role", + null, + null, + null + ) + ) + ) + ) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedStreamingProcessorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedStreamingProcessorTests.java new file mode 100644 index 0000000000000..0f127998f9c54 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/openai/OpenAiUnifiedStreamingProcessorTests.java @@ -0,0 +1,383 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.openai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.StreamingUnifiedChatCompletionResults; + +import java.io.IOException; +import java.util.List; + +public class OpenAiUnifiedStreamingProcessorTests extends ESTestCase { + + public void testJsonLiteral() { + String json = """ + { + "id": "example_id", + "choices": [ + { + "delta": { + "content": "example_content", + "refusal": null, + "role": "assistant", + "tool_calls": [ + { + "index": 1, + "id": "tool_call_id", + "function": { + "arguments": "example_arguments", + "name": "example_function_name" + }, + "type": "function" + } + ] + }, + "finish_reason": "stop", + "index": 0 + } + ], + "model": "example_model", + "object": "chat.completion.chunk", + "usage": { + "completion_tokens": 50, + "prompt_tokens": 20, + "total_tokens": 70 + } + } + """; + // Parse the JSON + XContentParserConfiguration parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler( + LoggingDeprecationHandler.INSTANCE + ); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, json)) { + StreamingUnifiedChatCompletionResults.ChatCompletionChunk chunk = OpenAiUnifiedStreamingProcessor.ChatCompletionChunkParser + .parse(parser); + + // Assertions to verify the parsed object + assertEquals("example_id", chunk.getId()); + assertEquals("example_model", chunk.getModel()); + assertEquals("chat.completion.chunk", chunk.getObject()); + assertNotNull(chunk.getUsage()); + assertEquals(50, chunk.getUsage().completionTokens()); + assertEquals(20, chunk.getUsage().promptTokens()); + assertEquals(70, chunk.getUsage().totalTokens()); + + List choices = chunk.getChoices(); + assertEquals(1, choices.size()); + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice choice = choices.get(0); + assertEquals("example_content", choice.delta().getContent()); + assertNull(choice.delta().getRefusal()); + assertEquals("assistant", choice.delta().getRole()); + assertEquals("stop", choice.finishReason()); + assertEquals(0, choice.index()); + + List toolCalls = choice.delta().getToolCalls(); + assertEquals(1, toolCalls.size()); + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall toolCall = toolCalls.get(0); + assertEquals(1, toolCall.getIndex()); + assertEquals("tool_call_id", toolCall.getId()); + assertEquals("example_function_name", toolCall.getFunction().getName()); + assertEquals("example_arguments", toolCall.getFunction().getArguments()); + assertEquals("function", toolCall.getType()); + } catch (IOException e) { + fail(); + } + } + + public void testJsonLiteralCornerCases() { + String json = """ + { + "id": "example_id", + "choices": [ + { + "delta": { + "content": null, + "refusal": null, + "role": "assistant", + "tool_calls": [] + }, + "finish_reason": null, + "index": 0 + }, + { + "delta": { + "content": "example_content", + "refusal": "example_refusal", + "role": "user", + "tool_calls": [ + { + "index": 1, + "function": { + "name": "example_function_name" + }, + "type": "function" + } + ] + }, + "finish_reason": "stop", + "index": 1 + } + ], + "model": "example_model", + "object": "chat.completion.chunk", + "usage": null + } + """; + // Parse the JSON + XContentParserConfiguration parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler( + LoggingDeprecationHandler.INSTANCE + ); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, json)) { + StreamingUnifiedChatCompletionResults.ChatCompletionChunk chunk = OpenAiUnifiedStreamingProcessor.ChatCompletionChunkParser + .parse(parser); + + // Assertions to verify the parsed object + assertEquals("example_id", chunk.getId()); + assertEquals("example_model", chunk.getModel()); + assertEquals("chat.completion.chunk", chunk.getObject()); + assertNull(chunk.getUsage()); + + List choices = chunk.getChoices(); + assertEquals(2, choices.size()); + + // First choice assertions + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice firstChoice = choices.get(0); + assertNull(firstChoice.delta().getContent()); + assertNull(firstChoice.delta().getRefusal()); + assertEquals("assistant", firstChoice.delta().getRole()); + assertTrue(firstChoice.delta().getToolCalls().isEmpty()); + assertNull(firstChoice.finishReason()); + assertEquals(0, firstChoice.index()); + + // Second choice assertions + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice secondChoice = choices.get(1); + assertEquals("example_content", secondChoice.delta().getContent()); + assertEquals("example_refusal", secondChoice.delta().getRefusal()); + assertEquals("user", secondChoice.delta().getRole()); + assertEquals("stop", secondChoice.finishReason()); + assertEquals(1, secondChoice.index()); + + List toolCalls = secondChoice.delta() + .getToolCalls(); + assertEquals(1, toolCalls.size()); + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall toolCall = toolCalls.get(0); + assertEquals(1, toolCall.getIndex()); + assertNull(toolCall.getId()); + assertEquals("example_function_name", toolCall.getFunction().getName()); + assertNull(toolCall.getFunction().getArguments()); + assertEquals("function", toolCall.getType()); + } catch (IOException e) { + fail(); + } + } + + public void testOpenAiUnifiedStreamingProcessorParsing() throws IOException { + // Generate random values for the JSON fields + int toolCallIndex = randomIntBetween(0, 10); + String toolCallId = randomAlphaOfLength(5); + String toolCallFunctionName = randomAlphaOfLength(8); + String toolCallFunctionArguments = randomAlphaOfLength(10); + String toolCallType = "function"; + String toolCallJson = createToolCallJson(toolCallIndex, toolCallId, toolCallFunctionName, toolCallFunctionArguments, toolCallType); + + String choiceContent = randomAlphaOfLength(10); + String choiceRole = randomFrom("system", "user", "assistant", "tool"); + String choiceFinishReason = randomFrom("stop", "length", "tool_calls", "content_filter", "function_call", null); + int choiceIndex = randomIntBetween(0, 10); + String choiceJson = createChoiceJson(choiceContent, null, choiceRole, toolCallJson, choiceFinishReason, choiceIndex); + + int usageCompletionTokens = randomIntBetween(1, 100); + int usagePromptTokens = randomIntBetween(1, 100); + int usageTotalTokens = randomIntBetween(1, 200); + String usageJson = createUsageJson(usageCompletionTokens, usagePromptTokens, usageTotalTokens); + + String chatCompletionChunkId = randomAlphaOfLength(10); + String chatCompletionChunkModel = randomAlphaOfLength(5); + String chatCompletionChunkJson = createChatCompletionChunkJson( + chatCompletionChunkId, + choiceJson, + chatCompletionChunkModel, + "chat.completion.chunk", + usageJson + ); + + // Parse the JSON + XContentParserConfiguration parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler( + LoggingDeprecationHandler.INSTANCE + ); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, chatCompletionChunkJson)) { + StreamingUnifiedChatCompletionResults.ChatCompletionChunk chunk = OpenAiUnifiedStreamingProcessor.ChatCompletionChunkParser + .parse(parser); + + // Assertions to verify the parsed object + assertEquals(chatCompletionChunkId, chunk.getId()); + assertEquals(chatCompletionChunkModel, chunk.getModel()); + assertEquals("chat.completion.chunk", chunk.getObject()); + assertNotNull(chunk.getUsage()); + assertEquals(usageCompletionTokens, chunk.getUsage().completionTokens()); + assertEquals(usagePromptTokens, chunk.getUsage().promptTokens()); + assertEquals(usageTotalTokens, chunk.getUsage().totalTokens()); + + List choices = chunk.getChoices(); + assertEquals(1, choices.size()); + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice choice = choices.get(0); + assertEquals(choiceContent, choice.delta().getContent()); + assertNull(choice.delta().getRefusal()); + assertEquals(choiceRole, choice.delta().getRole()); + assertEquals(choiceFinishReason, choice.finishReason()); + assertEquals(choiceIndex, choice.index()); + + List toolCalls = choice.delta().getToolCalls(); + assertEquals(1, toolCalls.size()); + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice.Delta.ToolCall toolCall = toolCalls.get(0); + assertEquals(toolCallIndex, toolCall.getIndex()); + assertEquals(toolCallId, toolCall.getId()); + assertEquals(toolCallFunctionName, toolCall.getFunction().getName()); + assertEquals(toolCallFunctionArguments, toolCall.getFunction().getArguments()); + assertEquals(toolCallType, toolCall.getType()); + } + } + + public void testOpenAiUnifiedStreamingProcessorParsingWithNullFields() throws IOException { + // JSON with null fields + int choiceIndex = randomIntBetween(0, 10); + String choiceJson = createChoiceJson(null, null, null, "", null, choiceIndex); + + String chatCompletionChunkId = randomAlphaOfLength(10); + String chatCompletionChunkModel = randomAlphaOfLength(5); + String chatCompletionChunkJson = createChatCompletionChunkJson( + chatCompletionChunkId, + choiceJson, + chatCompletionChunkModel, + "chat.completion.chunk", + null + ); + + // Parse the JSON + XContentParserConfiguration parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler( + LoggingDeprecationHandler.INSTANCE + ); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, chatCompletionChunkJson)) { + StreamingUnifiedChatCompletionResults.ChatCompletionChunk chunk = OpenAiUnifiedStreamingProcessor.ChatCompletionChunkParser + .parse(parser); + + // Assertions to verify the parsed object + assertEquals(chatCompletionChunkId, chunk.getId()); + assertEquals(chatCompletionChunkModel, chunk.getModel()); + assertEquals("chat.completion.chunk", chunk.getObject()); + assertNull(chunk.getUsage()); + + List choices = chunk.getChoices(); + assertEquals(1, choices.size()); + StreamingUnifiedChatCompletionResults.ChatCompletionChunk.Choice choice = choices.get(0); + assertNull(choice.delta().getContent()); + assertNull(choice.delta().getRefusal()); + assertNull(choice.delta().getRole()); + assertNull(choice.finishReason()); + assertEquals(choiceIndex, choice.index()); + assertTrue(choice.delta().getToolCalls().isEmpty()); + } + } + + private String createToolCallJson(int index, String id, String functionName, String functionArguments, String type) { + return Strings.format(""" + { + "index": %d, + "id": "%s", + "function": { + "name": "%s", + "arguments": "%s" + }, + "type": "%s" + } + """, index, id, functionName, functionArguments, type); + } + + private String createChoiceJson(String content, String refusal, String role, String toolCallsJson, String finishReason, int index) { + if (role == null) { + return Strings.format( + """ + { + "delta": { + "content": %s, + "refusal": %s, + "tool_calls": [%s] + }, + "finish_reason": %s, + "index": %d + } + """, + content != null ? "\"" + content + "\"" : "null", + refusal != null ? "\"" + refusal + "\"" : "null", + toolCallsJson, + finishReason != null ? "\"" + finishReason + "\"" : "null", + index + ); + } else { + return Strings.format( + """ + { + "delta": { + "content": %s, + "refusal": %s, + "role": %s, + "tool_calls": [%s] + }, + "finish_reason": %s, + "index": %d + } + """, + content != null ? "\"" + content + "\"" : "null", + refusal != null ? "\"" + refusal + "\"" : "null", + role != null ? "\"" + role + "\"" : "null", + toolCallsJson, + finishReason != null ? "\"" + finishReason + "\"" : "null", + index + ); + } + } + + private String createChatCompletionChunkJson(String id, String choicesJson, String model, String object, String usageJson) { + if (usageJson != null) { + return Strings.format(""" + { + "id": "%s", + "choices": [%s], + "model": "%s", + "object": "%s", + "usage": %s + } + """, id, choicesJson, model, object, usageJson); + } else { + return Strings.format(""" + { + "id": "%s", + "choices": [%s], + "model": "%s", + "object": "%s" + } + """, id, choicesJson, model, object); + } + } + + private String createUsageJson(int completionTokens, int promptTokens, int totalTokens) { + return Strings.format(""" + { + "completion_tokens": %d, + "prompt_tokens": %d, + "total_tokens": %d + } + """, completionTokens, promptTokens, totalTokens); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/completion/GoogleAiStudioCompletionRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/completion/GoogleAiStudioCompletionRequestTests.java index 7ffa8940ad6be..065dfee577a82 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/completion/GoogleAiStudioCompletionRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googleaistudio/completion/GoogleAiStudioCompletionRequestTests.java @@ -10,7 +10,7 @@ import org.apache.http.client.methods.HttpPost; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.ChatCompletionInput; import org.elasticsearch.xpack.inference.external.request.googleaistudio.GoogleAiStudioCompletionRequest; import org.elasticsearch.xpack.inference.services.googleaistudio.completion.GoogleAiStudioCompletionModelTests; @@ -72,7 +72,7 @@ public void testTruncationInfo_ReturnsNull() { assertNull(request.getTruncationInfo()); } - private static DocumentsOnlyInput listOf(String... input) { - return new DocumentsOnlyInput(List.of(input)); + private static ChatCompletionInput listOf(String... input) { + return new ChatCompletionInput(List.of(input)); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntityTests.java deleted file mode 100644 index 9d5492f9e9516..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestEntityTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.external.request.openai; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; - -import java.io.IOException; -import java.util.List; - -import static org.hamcrest.CoreMatchers.is; - -public class OpenAiChatCompletionRequestEntityTests extends ESTestCase { - - public void testXContent_WritesUserWhenDefined() throws IOException { - var entity = new OpenAiChatCompletionRequestEntity(List.of("abc"), "model", "user", false); - - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); - entity.toXContent(builder, null); - String xContentResult = Strings.toString(builder); - - assertThat(xContentResult, is(""" - {"messages":[{"role":"user","content":"abc"}],"model":"model","n":1,"user":"user"}""")); - - } - - public void testXContent_DoesNotWriteUserWhenItIsNull() throws IOException { - var entity = new OpenAiChatCompletionRequestEntity(List.of("abc"), "model", null, false); - - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); - entity.toXContent(builder, null); - String xContentResult = Strings.toString(builder); - - assertThat(xContentResult, is(""" - {"messages":[{"role":"user","content":"abc"}],"model":"model","n":1}""")); - } - - public void testXContent_ThrowsIfModelIsNull() { - assertThrows(NullPointerException.class, () -> new OpenAiChatCompletionRequestEntity(List.of("abc"), null, "user", false)); - } - - public void testXContent_ThrowsIfMessagesAreNull() { - assertThrows(NullPointerException.class, () -> new OpenAiChatCompletionRequestEntity(null, "model", "user", false)); - } -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..f945c154ea234 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestEntityTests.java @@ -0,0 +1,856 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.openai; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.UnifiedCompletionRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModel; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Random; + +import static org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModelTests.createChatCompletionModel; +import static org.hamcrest.Matchers.equalTo; + +public class OpenAiUnifiedChatCompletionRequestEntityTests extends ESTestCase { + + // 1. Basic Serialization + // Test with minimal required fields to ensure basic serialization works. + public void testBasicSerialization() throws IOException { + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("Hello, world!"), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + null, + null, + null + ); + var messageList = new ArrayList(); + messageList.add(message); + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest(messageList, null, null, null, null, null, null, null); + + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + OpenAiChatCompletionModel model = createChatCompletionModel("test-url", "organizationId", "api-key", "test-endpoint", null); + + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + + String jsonString = Strings.toString(builder); + String expectedJson = """ + { + "messages": [ + { + "content": "Hello, world!", + "role": "user" + } + ], + "model": "test-endpoint", + "n": 1, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """; + assertJsonEquals(jsonString, expectedJson); + } + + // 2. Serialization with All Fields + // Test with all possible fields populated to ensure complete serialization. + public void testSerializationWithAllFields() throws IOException { + // Create a message with all fields populated + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("Hello, world!"), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + "name", + "tool_call_id", + Collections.singletonList( + new UnifiedCompletionRequest.ToolCall( + "id", + new UnifiedCompletionRequest.ToolCall.FunctionField("arguments", "function_name"), + "type" + ) + ) + ); + + // Create a tool with all fields populated + UnifiedCompletionRequest.Tool tool = new UnifiedCompletionRequest.Tool( + "type", + new UnifiedCompletionRequest.Tool.FunctionField( + "Fetches the weather in the given location", + "get_weather", + createParameters(), + true + ) + ); + var messageList = new ArrayList(); + messageList.add(message); + // Create the unified request with all fields populated + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + "model", + 100L, // maxCompletionTokens + Collections.singletonList("stop"), + 0.9f, // temperature + new UnifiedCompletionRequest.ToolChoiceString("tool_choice"), + Collections.singletonList(tool), + 0.8f // topP + ); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Serialize to XContent + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + + // Convert to string and verify + String jsonString = Strings.toString(builder); + String expectedJson = """ + { + "messages": [ + { + "content": "Hello, world!", + "role": "user", + "name": "name", + "tool_call_id": "tool_call_id", + "tool_calls": [ + { + "id": "id", + "function": { + "arguments": "arguments", + "name": "function_name" + }, + "type": "type" + } + ] + } + ], + "model": "model-name", + "max_completion_tokens": 100, + "n": 1, + "stop": ["stop"], + "temperature": 0.9, + "tool_choice": "tool_choice", + "tools": [ + { + "type": "type", + "function": { + "description": "Fetches the weather in the given location", + "name": "get_weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "description": "The location to get the weather for", + "type": "string" + }, + "unit": { + "description": "The unit to return the temperature in", + "type": "string", + "enum": ["F", "C"] + } + }, + "additionalProperties": false, + "required": ["location", "unit"] + }, + "strict": true + } + } + ], + "top_p": 0.8, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """; + assertJsonEquals(jsonString, expectedJson); + + } + + // 3. Serialization with Null Optional Fields + // Test with optional fields set to null to ensure they are correctly omitted from the output. + public void testSerializationWithNullOptionalFields() throws IOException { + // Create a message with minimal required fields + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("Hello, world!"), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + null, + null, + null + ); + var messageList = new ArrayList(); + messageList.add(message); + + // Create the unified request with optional fields set to null + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + null, // model + null, // maxCompletionTokens + null, // stop + null, // temperature + null, // toolChoice + null, // tools + null // topP + ); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Serialize to XContent + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + + // Convert to string and verify + String jsonString = Strings.toString(builder); + String expectedJson = """ + { + "messages": [ + { + "content": "Hello, world!", + "role": "user" + } + ], + "model": "model-name", + "n": 1, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """; + assertJsonEquals(jsonString, expectedJson); + } + + // 4. Serialization with Empty Lists + // Test with fields that are lists set to empty lists to ensure they are correctly serialized. + public void testSerializationWithEmptyLists() throws IOException { + // Create a message with minimal required fields + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("Hello, world!"), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + null, + null, + Collections.emptyList() // empty toolCalls list + ); + var messageList = new ArrayList(); + messageList.add(message); + // Create the unified request with empty lists + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + null, // model + null, // maxCompletionTokens + Collections.emptyList(), // empty stop list + null, // temperature + null, // toolChoice + Collections.emptyList(), // empty tools list + null // topP + ); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Serialize to XContent + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + + // Convert to string and verify + String jsonString = Strings.toString(builder); + String expectedJson = """ + { + "messages": [ + { + "content": "Hello, world!", + "role": "user", + "tool_calls": [] + } + ], + "model": "model-name", + "n": 1, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """; + assertJsonEquals(jsonString, expectedJson); + } + + // 5. Serialization with Nested Objects + // Test with nested objects (e.g., toolCalls, toolChoice, tool) to ensure they are correctly serialized. + public void testSerializationWithNestedObjects() throws IOException { + Random random = Randomness.get(); + + // Generate random values + String randomContent = "Hello, world! " + random.nextInt(1000); + String randomName = "name" + random.nextInt(1000); + String randomToolCallId = "tool_call_id" + random.nextInt(1000); + String randomArguments = "arguments" + random.nextInt(1000); + String randomFunctionName = "function_name" + random.nextInt(1000); + String randomType = "type" + random.nextInt(1000); + String randomModel = "model" + random.nextInt(1000); + String randomStop = "stop" + random.nextInt(1000); + float randomTemperature = (float) ((float) Math.round(0.5d + (double) random.nextFloat() * 0.5d * 100000d) / 100000d); + float randomTopP = (float) ((float) Math.round(0.5d + (double) random.nextFloat() * 0.5d * 100000d) / 100000d); + + // Create a message with nested toolCalls + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString(randomContent), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + randomName, + randomToolCallId, + Collections.singletonList( + new UnifiedCompletionRequest.ToolCall( + "id", + new UnifiedCompletionRequest.ToolCall.FunctionField(randomArguments, randomFunctionName), + randomType + ) + ) + ); + + // Create a tool with nested function fields + UnifiedCompletionRequest.Tool tool = new UnifiedCompletionRequest.Tool( + randomType, + new UnifiedCompletionRequest.Tool.FunctionField( + "Fetches the weather in the given location", + "get_weather", + createParameters(), + true + ) + ); + var messageList = new ArrayList(); + messageList.add(message); + // Create the unified request with nested objects + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + randomModel, + 100L, // maxCompletionTokens + Collections.singletonList(randomStop), + randomTemperature, // temperature + new UnifiedCompletionRequest.ToolChoiceObject( + randomType, + new UnifiedCompletionRequest.ToolChoiceObject.FunctionField(randomFunctionName) + ), + Collections.singletonList(tool), + randomTopP // topP + ); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", randomModel, null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Serialize to XContent + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + + // Convert to string and verify + String jsonString = Strings.toString(builder); + // Expected JSON should be dynamically generated based on random values + String expectedJson = String.format( + Locale.US, + """ + { + "messages": [ + { + "content": "%s", + "role": "user", + "name": "%s", + "tool_call_id": "%s", + "tool_calls": [ + { + "id": "id", + "function": { + "arguments": "%s", + "name": "%s" + }, + "type": "%s" + } + ] + } + ], + "model": "%s", + "max_completion_tokens": 100, + "n": 1, + "stop": ["%s"], + "temperature": %.5f, + "tool_choice": { + "type": "%s", + "function": { + "name": "%s" + } + }, + "tools": [ + { + "type": "%s", + "function": { + "description": "Fetches the weather in the given location", + "name": "get_weather", + "parameters": { + "type": "object", + "properties": { + "unit": { + "description": "The unit to return the temperature in", + "type": "string", + "enum": ["F", "C"] + }, + "location": { + "description": "The location to get the weather for", + "type": "string" + } + }, + "additionalProperties": false, + "required": ["location", "unit"] + }, + "strict": true + } + } + ], + "top_p": %.5f, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """, + randomContent, + randomName, + randomToolCallId, + randomArguments, + randomFunctionName, + randomType, + randomModel, + randomStop, + randomTemperature, + randomType, + randomFunctionName, + randomType, + randomTopP + ); + assertJsonEquals(jsonString, expectedJson); + } + + // 6. Serialization with Different Content Types + // Test with different content types in messages (e.g., ContentString, ContentObjects) to ensure they are correctly serialized. + public void testSerializationWithDifferentContentTypes() throws IOException { + Random random = Randomness.get(); + + // Generate random values for ContentString + String randomContentString = "Hello, world! " + random.nextInt(1000); + + // Generate random values for ContentObjects + String randomText = "Random text " + random.nextInt(1000); + String randomType = "type" + random.nextInt(1000); + UnifiedCompletionRequest.ContentObject contentObject = new UnifiedCompletionRequest.ContentObject(randomText, randomType); + + var contentObjectsList = new ArrayList(); + contentObjectsList.add(contentObject); + UnifiedCompletionRequest.ContentObjects contentObjects = new UnifiedCompletionRequest.ContentObjects(contentObjectsList); + + // Create messages with different content types + UnifiedCompletionRequest.Message messageWithString = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString(randomContentString), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + null, + null, + null + ); + + UnifiedCompletionRequest.Message messageWithObjects = new UnifiedCompletionRequest.Message( + contentObjects, + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + null, + null, + null + ); + var messageList = new ArrayList(); + messageList.add(messageWithString); + messageList.add(messageWithObjects); + + // Create the unified request with both types of messages + UnifiedCompletionRequest unifiedRequest = UnifiedCompletionRequest.of(messageList); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Serialize to XContent + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + + // Convert to string and verify + String jsonString = Strings.toString(builder); + String expectedJson = String.format(Locale.US, """ + { + "messages": [ + { + "content": "%s", + "role": "user" + }, + { + "content": [ + { + "text": "%s", + "type": "%s" + } + ], + "role": "user" + } + ], + "model": "model-name", + "n": 1, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """, randomContentString, randomText, randomType); + assertJsonEquals(jsonString, expectedJson); + } + + // 7. Serialization with Special Characters + // Test with special characters in string fields to ensure they are correctly escaped and serialized. + public void testSerializationWithSpecialCharacters() throws IOException { + // Create a message with special characters + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("Hello, world! \n \"Special\" characters: \t \\ /"), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + "name\nwith\nnewlines", + "tool_call_id\twith\ttabs", + Collections.singletonList( + new UnifiedCompletionRequest.ToolCall( + "id\\with\\backslashes", + new UnifiedCompletionRequest.ToolCall.FunctionField("arguments\"with\"quotes", "function_name/with/slashes"), + "type" + ) + ) + ); + var messageList = new ArrayList(); + messageList.add(message); + // Create the unified request + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + null, // model + null, // maxCompletionTokens + null, // stop + null, // temperature + null, // toolChoice + null, // tools + null // topP + ); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Serialize to XContent + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + + // Convert to string and verify + String jsonString = Strings.toString(builder); + String expectedJson = """ + { + "messages": [ + { + "content": "Hello, world! \\n \\"Special\\" characters: \\t \\\\ /", + "role": "user", + "name": "name\\nwith\\nnewlines", + "tool_call_id": "tool_call_id\\twith\\ttabs", + "tool_calls": [ + { + "id": "id\\\\with\\\\backslashes", + "function": { + "arguments": "arguments\\"with\\"quotes", + "name": "function_name/with/slashes" + }, + "type": "type" + } + ] + } + ], + "model": "model-name", + "n": 1, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """; + assertJsonEquals(jsonString, expectedJson); + } + + // 8. Serialization with Boolean Fields + // Test with boolean fields (stream) set to both true and false to ensure they are correctly serialized. + public void testSerializationWithBooleanFields() throws IOException { + // Create a message with minimal required fields + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("Hello, world!"), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + null, + null, + null + ); + var messageList = new ArrayList(); + messageList.add(message); + // Create the unified request + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + null, // model + null, // maxCompletionTokens + null, // stop + null, // temperature + null, // toolChoice + null, // tools + null // topP + ); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Test with stream set to true + UnifiedChatInput unifiedChatInputTrue = new UnifiedChatInput(unifiedRequest, true); + OpenAiUnifiedChatCompletionRequestEntity entityTrue = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInputTrue, model); + + XContentBuilder builderTrue = JsonXContent.contentBuilder(); + entityTrue.toXContent(builderTrue, ToXContent.EMPTY_PARAMS); + + String jsonStringTrue = Strings.toString(builderTrue); + String expectedJsonTrue = """ + { + "messages": [ + { + "content": "Hello, world!", + "role": "user" + } + ], + "model": "model-name", + "n": 1, + "stream": true, + "stream_options": { + "include_usage": true + } + } + """; + assertJsonEquals(expectedJsonTrue, jsonStringTrue); + + // Test with stream set to false + UnifiedChatInput unifiedChatInputFalse = new UnifiedChatInput(unifiedRequest, false); + OpenAiUnifiedChatCompletionRequestEntity entityFalse = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInputFalse, model); + + XContentBuilder builderFalse = JsonXContent.contentBuilder(); + entityFalse.toXContent(builderFalse, ToXContent.EMPTY_PARAMS); + + String jsonStringFalse = Strings.toString(builderFalse); + String expectedJsonFalse = """ + { + "messages": [ + { + "content": "Hello, world!", + "role": "user" + } + ], + "model": "model-name", + "n": 1, + "stream": false + } + """; + assertJsonEquals(expectedJsonFalse, jsonStringFalse); + } + + // 9. Serialization with Missing Required Fields + // Test with missing required fields to ensure appropriate exceptions are thrown. + public void testSerializationWithMissingRequiredFields() { + // Create a message with missing content (required field) + UnifiedCompletionRequest.Message message = new UnifiedCompletionRequest.Message( + null, // missing content + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + null, + null, + null + ); + var messageList = new ArrayList(); + messageList.add(message); + // Create the unified request + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + null, // model + null, // maxCompletionTokens + null, // stop + null, // temperature + null, // toolChoice + null, // tools + null // topP + ); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Attempt to serialize to XContent and expect an exception + try { + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + fail("Expected an exception due to missing required fields"); + } catch (NullPointerException | IOException e) { + // Expected exception + } + } + + // 10. Serialization with Mixed Valid and Invalid Data + // Test with a mix of valid and invalid data to ensure the serializer handles it gracefully. + public void testSerializationWithMixedValidAndInvalidData() throws IOException { + // Create a valid message + UnifiedCompletionRequest.Message validMessage = new UnifiedCompletionRequest.Message( + new UnifiedCompletionRequest.ContentString("Valid content"), + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + "validName", + "validToolCallId", + Collections.singletonList( + new UnifiedCompletionRequest.ToolCall( + "validId", + new UnifiedCompletionRequest.ToolCall.FunctionField("validArguments", "validFunctionName"), + "validType" + ) + ) + ); + + // Create an invalid message with null content + UnifiedCompletionRequest.Message invalidMessage = new UnifiedCompletionRequest.Message( + null, // invalid content + OpenAiUnifiedChatCompletionRequestEntity.USER_FIELD, + "invalidName", + "invalidToolCallId", + Collections.singletonList( + new UnifiedCompletionRequest.ToolCall( + "invalidId", + new UnifiedCompletionRequest.ToolCall.FunctionField("invalidArguments", "invalidFunctionName"), + "invalidType" + ) + ) + ); + var messageList = new ArrayList(); + messageList.add(validMessage); + messageList.add(invalidMessage); + // Create the unified request with both valid and invalid messages + UnifiedCompletionRequest unifiedRequest = new UnifiedCompletionRequest( + messageList, + "model-name", + 100L, // maxCompletionTokens + Collections.singletonList("stop"), + 0.9f, // temperature + new UnifiedCompletionRequest.ToolChoiceString("tool_choice"), + Collections.singletonList( + new UnifiedCompletionRequest.Tool( + "type", + new UnifiedCompletionRequest.Tool.FunctionField( + "Fetches the weather in the given location", + "get_weather", + createParameters(), + true + ) + ) + ), + 0.8f // topP + ); + + // Create the unified chat input + UnifiedChatInput unifiedChatInput = new UnifiedChatInput(unifiedRequest, true); + + OpenAiChatCompletionModel model = createChatCompletionModel("test-endpoint", "organizationId", "api-key", "model-name", null); + + // Create the entity + OpenAiUnifiedChatCompletionRequestEntity entity = new OpenAiUnifiedChatCompletionRequestEntity(unifiedChatInput, model); + + // Serialize to XContent and verify + try { + XContentBuilder builder = JsonXContent.contentBuilder(); + entity.toXContent(builder, ToXContent.EMPTY_PARAMS); + fail("Expected an exception due to invalid data"); + } catch (NullPointerException | IOException e) { + // Expected exception + } + } + + public static Map createParameters() { + Map parameters = new LinkedHashMap<>(); + parameters.put("type", "object"); + + Map properties = new HashMap<>(); + + Map location = new HashMap<>(); + location.put("type", "string"); + location.put("description", "The location to get the weather for"); + properties.put("location", location); + + Map unit = new HashMap<>(); + unit.put("type", "string"); + unit.put("description", "The unit to return the temperature in"); + unit.put("enum", new String[] { "F", "C" }); + properties.put("unit", unit); + + parameters.put("properties", properties); + parameters.put("additionalProperties", false); + parameters.put("required", new String[] { "location", "unit" }); + + return parameters; + } + + private void assertJsonEquals(String actual, String expected) throws IOException { + try ( + var actualParser = createParser(JsonXContent.jsonXContent, actual); + var expectedParser = createParser(JsonXContent.jsonXContent, expected) + ) { + assertThat(actualParser.mapOrdered(), equalTo(expectedParser.mapOrdered())); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestTests.java similarity index 75% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestTests.java index b6ebfd02941f3..2be12c9b12e0b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiChatCompletionRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/openai/OpenAiUnifiedChatCompletionRequestTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModelTests; import java.io.IOException; @@ -20,16 +21,16 @@ import java.util.Map; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; -import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiChatCompletionRequest.buildDefaultUri; +import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUnifiedChatCompletionRequest.buildDefaultUri; import static org.elasticsearch.xpack.inference.external.request.openai.OpenAiUtils.ORGANIZATION_HEADER; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -public class OpenAiChatCompletionRequestTests extends ESTestCase { +public class OpenAiUnifiedChatCompletionRequestTests extends ESTestCase { public void testCreateRequest_WithUrlOrganizationUserDefined() throws IOException { - var request = createRequest("www.google.com", "org", "secret", "abc", "model", "user"); + var request = createRequest("www.google.com", "org", "secret", "abc", "model", "user", true); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -41,15 +42,27 @@ public void testCreateRequest_WithUrlOrganizationUserDefined() throws IOExceptio assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); var requestMap = entityAsMap(httpPost.getEntity().getContent()); - assertThat(requestMap, aMapWithSize(4)); + assertRequestMapWithUser(requestMap, "user"); + } + + private void assertRequestMapWithoutUser(Map requestMap) { + assertRequestMapWithUser(requestMap, null); + } + + private void assertRequestMapWithUser(Map requestMap, @Nullable String user) { + assertThat(requestMap, aMapWithSize(user != null ? 6 : 5)); assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); assertThat(requestMap.get("model"), is("model")); - assertThat(requestMap.get("user"), is("user")); + if (user != null) { + assertThat(requestMap.get("user"), is(user)); + } assertThat(requestMap.get("n"), is(1)); + assertTrue((Boolean) requestMap.get("stream")); + assertThat(requestMap.get("stream_options"), is(Map.of("include_usage", true))); } public void testCreateRequest_WithDefaultUrl() throws URISyntaxException, IOException { - var request = createRequest(null, "org", "secret", "abc", "model", "user"); + var request = createRequest(null, "org", "secret", "abc", "model", "user", true); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -61,33 +74,27 @@ public void testCreateRequest_WithDefaultUrl() throws URISyntaxException, IOExce assertThat(httpPost.getLastHeader(ORGANIZATION_HEADER).getValue(), is("org")); var requestMap = entityAsMap(httpPost.getEntity().getContent()); - assertThat(requestMap, aMapWithSize(4)); - assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); - assertThat(requestMap.get("model"), is("model")); - assertThat(requestMap.get("user"), is("user")); - assertThat(requestMap.get("n"), is(1)); + assertRequestMapWithUser(requestMap, "user"); + } public void testCreateRequest_WithDefaultUrlAndWithoutUserOrganization() throws URISyntaxException, IOException { - var request = createRequest(null, null, "secret", "abc", "model", null); + var request = createRequest(null, null, "secret", "abc", "model", null, true); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); var httpPost = (HttpPost) httpRequest.httpRequestBase(); - assertThat(httpPost.getURI().toString(), is(OpenAiChatCompletionRequest.buildDefaultUri().toString())); + assertThat(httpPost.getURI().toString(), is(OpenAiUnifiedChatCompletionRequest.buildDefaultUri().toString())); assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is("Bearer secret")); assertNull(httpPost.getLastHeader(ORGANIZATION_HEADER)); var requestMap = entityAsMap(httpPost.getEntity().getContent()); - assertThat(requestMap, aMapWithSize(3)); - assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); - assertThat(requestMap.get("model"), is("model")); - assertThat(requestMap.get("n"), is(1)); + assertRequestMapWithoutUser(requestMap); } - public void testCreateRequest_WithStreaming() throws URISyntaxException, IOException { + public void testCreateRequest_WithStreaming() throws IOException { var request = createRequest(null, null, "secret", "abc", "model", null, true); var httpRequest = request.createHttpRequest(); @@ -99,29 +106,31 @@ public void testCreateRequest_WithStreaming() throws URISyntaxException, IOExcep } public void testTruncate_DoesNotReduceInputTextSize() throws URISyntaxException, IOException { - var request = createRequest(null, null, "secret", "abcd", "model", null); + var request = createRequest(null, null, "secret", "abcd", "model", null, true); var truncatedRequest = request.truncate(); - assertThat(request.getURI().toString(), is(OpenAiChatCompletionRequest.buildDefaultUri().toString())); + assertThat(request.getURI().toString(), is(OpenAiUnifiedChatCompletionRequest.buildDefaultUri().toString())); var httpRequest = truncatedRequest.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); var httpPost = (HttpPost) httpRequest.httpRequestBase(); var requestMap = entityAsMap(httpPost.getEntity().getContent()); - assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap, aMapWithSize(5)); // We do not truncate for OpenAi chat completions assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abcd")))); assertThat(requestMap.get("model"), is("model")); assertThat(requestMap.get("n"), is(1)); + assertTrue((Boolean) requestMap.get("stream")); + assertThat(requestMap.get("stream_options"), is(Map.of("include_usage", true))); } public void testTruncationInfo_ReturnsNull() { - var request = createRequest(null, null, "secret", "abcd", "model", null); + var request = createRequest(null, null, "secret", "abcd", "model", null, true); assertNull(request.getTruncationInfo()); } - public static OpenAiChatCompletionRequest createRequest( + public static OpenAiUnifiedChatCompletionRequest createRequest( @Nullable String url, @Nullable String org, String apiKey, @@ -132,7 +141,7 @@ public static OpenAiChatCompletionRequest createRequest( return createRequest(url, org, apiKey, input, model, user, false); } - public static OpenAiChatCompletionRequest createRequest( + public static OpenAiUnifiedChatCompletionRequest createRequest( @Nullable String url, @Nullable String org, String apiKey, @@ -142,7 +151,7 @@ public static OpenAiChatCompletionRequest createRequest( boolean stream ) { var chatCompletionModel = OpenAiChatCompletionModelTests.createChatCompletionModel(url, org, apiKey, model, user); - return new OpenAiChatCompletionRequest(List.of(input), chatCompletionModel, stream); + return new OpenAiUnifiedChatCompletionRequest(new UnifiedChatInput(List.of(input), "user", stream), chatCompletionModel); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/BaseInferenceActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/BaseInferenceActionTests.java index 05a8d52be5df4..5528c80066b0a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/BaseInferenceActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/BaseInferenceActionTests.java @@ -8,11 +8,14 @@ package org.elasticsearch.xpack.inference.rest; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestRequestTests; import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.test.rest.RestActionTestCase; @@ -26,6 +29,10 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.inference.rest.BaseInferenceAction.parseParams; +import static org.elasticsearch.xpack.inference.rest.BaseInferenceAction.parseTimeout; +import static org.elasticsearch.xpack.inference.rest.Paths.INFERENCE_ID; +import static org.elasticsearch.xpack.inference.rest.Paths.TASK_TYPE_OR_INFERENCE_ID; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -56,6 +63,42 @@ private static String route(String param) { return "_route/" + param; } + public void testParseParams_ExtractsInferenceIdAndTaskType() { + var params = parseParams( + RestRequestTests.contentRestRequest("{}", Map.of(INFERENCE_ID, "id", TASK_TYPE_OR_INFERENCE_ID, TaskType.COMPLETION.toString())) + ); + assertThat(params, is(new BaseInferenceAction.Params("id", TaskType.COMPLETION))); + } + + public void testParseParams_DefaultsToTaskTypeAny_WhenInferenceId_IsMissing() { + var params = parseParams( + RestRequestTests.contentRestRequest("{}", Map.of(TASK_TYPE_OR_INFERENCE_ID, TaskType.COMPLETION.toString())) + ); + assertThat(params, is(new BaseInferenceAction.Params("completion", TaskType.ANY))); + } + + public void testParseParams_ThrowsStatusException_WhenTaskTypeIsMissing() { + var e = expectThrows( + ElasticsearchStatusException.class, + () -> parseParams(RestRequestTests.contentRestRequest("{}", Map.of(INFERENCE_ID, "id"))) + ); + assertThat(e.getMessage(), is("Task type must not be null")); + } + + public void testParseTimeout_ReturnsTimeout() { + var timeout = parseTimeout( + RestRequestTests.contentRestRequest("{}", Map.of(InferenceAction.Request.TIMEOUT.getPreferredName(), "4s")) + ); + + assertThat(timeout, is(TimeValue.timeValueSeconds(4))); + } + + public void testParseTimeout_ReturnsDefaultTimeout() { + var timeout = parseTimeout(RestRequestTests.contentRestRequest("{}", Map.of())); + + assertThat(timeout, is(TimeValue.timeValueSeconds(30))); + } + public void testUsesDefaultTimeout() { SetOnce executeCalled = new SetOnce<>(); verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/RestUnifiedCompletionInferenceActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/RestUnifiedCompletionInferenceActionTests.java new file mode 100644 index 0000000000000..5acfe67b175df --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rest/RestUnifiedCompletionInferenceActionTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rest; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; +import org.junit.Before; + +import static org.elasticsearch.xpack.inference.rest.BaseInferenceActionTests.createResponse; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class RestUnifiedCompletionInferenceActionTests extends RestActionTestCase { + + @Before + public void setUpAction() { + controller().registerHandler(new RestUnifiedCompletionInferenceAction()); + } + + public void testStreamIsTrue() { + SetOnce executeCalled = new SetOnce<>(); + verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { + assertThat(actionRequest, instanceOf(UnifiedCompletionAction.Request.class)); + + var request = (UnifiedCompletionAction.Request) actionRequest; + assertThat(request.isStreaming(), is(true)); + + executeCalled.set(true); + return createResponse(); + })); + + var requestBody = """ + { + "messages": [ + { + "content": "abc", + "role": "user" + } + ] + } + """; + + RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) + .withPath("_inference/completion/test/_unified") + .withContent(new BytesArray(requestBody), XContentType.JSON) + .build(); + + final SetOnce responseSetOnce = new SetOnce<>(); + dispatchRequest(inferenceRequest, new AbstractRestChannel(inferenceRequest, true) { + @Override + public void sendResponse(RestResponse response) { + responseSetOnce.set(response); + } + }); + + // the response content will be null when there is no error + assertNull(responseSetOnce.get().content()); + assertThat(executeCalled.get(), equalTo(true)); + } + + private void dispatchRequest(final RestRequest request, final RestChannel channel) { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + controller().dispatchRequest(request, channel, threadContext); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java index 47a96bf78dda1..6768583598b2d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.http.sender.UnifiedChatInput; import org.junit.After; import org.junit.Before; @@ -119,6 +120,14 @@ protected void doInfer( } + @Override + protected void doUnifiedCompletionInfer( + Model model, + UnifiedChatInput inputs, + TimeValue timeout, + ActionListener listener + ) {} + @Override protected void doChunkedInfer( Model model, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index 4672bc28b2bf0..c812ca67861fb 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.inference.Model; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; @@ -920,6 +921,68 @@ public void testInfer_SendsRequest() throws IOException { } } + public void testUnifiedCompletionInfer() throws Exception { + // The escapes are because the streaming response must be on a single line + String responseJson = """ + data: {\ + "id":"12345",\ + "object":"chat.completion.chunk",\ + "created":123456789,\ + "model":"gpt-4o-mini",\ + "system_fingerprint": "123456789",\ + "choices":[\ + {\ + "index":0,\ + "delta":{\ + "content":"hello, world"\ + },\ + "logprobs":null,\ + "finish_reason":"stop"\ + }\ + ],\ + "usage":{\ + "prompt_tokens": 16,\ + "completion_tokens": 28,\ + "total_tokens": 44,\ + "prompt_tokens_details": {\ + "cached_tokens": 0,\ + "audio_tokens": 0\ + },\ + "completion_tokens_details": {\ + "reasoning_tokens": 0,\ + "audio_tokens": 0,\ + "accepted_prediction_tokens": 0,\ + "rejected_prediction_tokens": 0\ + }\ + }\ + } + + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + try (var service = new OpenAiService(senderFactory, createWithEmptySettings(threadPool))) { + var model = OpenAiChatCompletionModelTests.createChatCompletionModel(getUrl(webServer), "org", "secret", "model", "user"); + PlainActionFuture listener = new PlainActionFuture<>(); + service.unifiedCompletionInfer( + model, + UnifiedCompletionRequest.of( + List.of( + new UnifiedCompletionRequest.Message(new UnifiedCompletionRequest.ContentString("hello"), "user", null, null, null) + ) + ), + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var result = listener.actionGet(TIMEOUT); + InferenceEventsAssertion.assertThat(result).hasFinishedStream().hasNoErrors().hasEvent(""" + {"id":"12345","choices":[{"delta":{"content":"hello, world"},"finish_reason":"stop","index":0}],""" + """ + "model":"gpt-4o-mini","object":"chat.completion.chunk",""" + """ + "usage":{"completion_tokens":28,"prompt_tokens":16,"total_tokens":44}}"""); + } + } + public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java index ab1786f0a5843..e7ac4cf879e92 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionModelTests.java @@ -10,9 +10,11 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnifiedCompletionRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; +import java.util.List; import java.util.Map; import static org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionRequestTaskSettingsTests.getChatCompletionRequestTaskSettingsMap; @@ -42,10 +44,48 @@ public void testOverrideWith_EmptyMap() { public void testOverrideWith_NullMap() { var model = createChatCompletionModel("url", "org", "api_key", "model_name", null); - var overriddenModel = OpenAiChatCompletionModel.of(model, null); + var overriddenModel = OpenAiChatCompletionModel.of(model, (Map) null); assertThat(overriddenModel, sameInstance(model)); } + public void testOverrideWith_UnifiedCompletionRequest_OverridesModelId() { + var model = createChatCompletionModel("url", "org", "api_key", "model_name", "user"); + var request = new UnifiedCompletionRequest( + List.of(new UnifiedCompletionRequest.Message(new UnifiedCompletionRequest.ContentString("hello"), "role", null, null, null)), + "different_model", + null, + null, + null, + null, + null, + null + ); + + assertThat( + OpenAiChatCompletionModel.of(model, request), + is(createChatCompletionModel("url", "org", "api_key", "different_model", "user")) + ); + } + + public void testOverrideWith_UnifiedCompletionRequest_UsesModelFields_WhenRequestDoesNotOverride() { + var model = createChatCompletionModel("url", "org", "api_key", "model_name", "user"); + var request = new UnifiedCompletionRequest( + List.of(new UnifiedCompletionRequest.Message(new UnifiedCompletionRequest.ContentString("hello"), "role", null, null, null)), + null, // not overriding model + null, + null, + null, + null, + null, + null + ); + + assertThat( + OpenAiChatCompletionModel.of(model, request), + is(createChatCompletionModel("url", "org", "api_key", "model_name", "user")) + ); + } + public static OpenAiChatCompletionModel createChatCompletionModel( String url, @Nullable String org, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/ApmInferenceStatsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/ApmInferenceStatsTests.java deleted file mode 100644 index 1a5aba5f89ad2..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/ApmInferenceStatsTests.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference.telemetry; - -import org.elasticsearch.inference.Model; -import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ServiceSettings; -import org.elasticsearch.inference.TaskType; -import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.MeterRegistry; -import org.elasticsearch.test.ESTestCase; - -import java.util.Map; - -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class ApmInferenceStatsTests extends ESTestCase { - - public void testRecordWithModel() { - var longCounter = mock(LongCounter.class); - - var stats = new ApmInferenceStats(longCounter); - - stats.incrementRequestCount(model("service", TaskType.ANY, "modelId")); - - verify(longCounter).incrementBy( - eq(1L), - eq(Map.of("service", "service", "task_type", TaskType.ANY.toString(), "model_id", "modelId")) - ); - } - - public void testRecordWithoutModel() { - var longCounter = mock(LongCounter.class); - - var stats = new ApmInferenceStats(longCounter); - - stats.incrementRequestCount(model("service", TaskType.ANY, null)); - - verify(longCounter).incrementBy(eq(1L), eq(Map.of("service", "service", "task_type", TaskType.ANY.toString()))); - } - - public void testCreation() { - assertNotNull(ApmInferenceStats.create(MeterRegistry.NOOP)); - } - - private Model model(String service, TaskType taskType, String modelId) { - var configuration = mock(ModelConfigurations.class); - when(configuration.getService()).thenReturn(service); - var settings = mock(ServiceSettings.class); - if (modelId != null) { - when(settings.modelId()).thenReturn(modelId); - } - - var model = mock(Model.class); - when(model.getTaskType()).thenReturn(taskType); - when(model.getConfigurations()).thenReturn(configuration); - when(model.getServiceSettings()).thenReturn(settings); - - return model; - } -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/InferenceStatsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/InferenceStatsTests.java new file mode 100644 index 0000000000000..d9327295ba5fa --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/InferenceStatsTests.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.telemetry; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.UnparsedModel; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.telemetry.InferenceStats.modelAttributes; +import static org.elasticsearch.xpack.inference.telemetry.InferenceStats.responseAttributes; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.assertArg; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class InferenceStatsTests extends ESTestCase { + + public void testRecordWithModel() { + var longCounter = mock(LongCounter.class); + var stats = new InferenceStats(longCounter, mock()); + + stats.requestCount().incrementBy(1, modelAttributes(model("service", TaskType.ANY, "modelId"))); + + verify(longCounter).incrementBy( + eq(1L), + eq(Map.of("service", "service", "task_type", TaskType.ANY.toString(), "model_id", "modelId")) + ); + } + + public void testRecordWithoutModel() { + var longCounter = mock(LongCounter.class); + var stats = new InferenceStats(longCounter, mock()); + + stats.requestCount().incrementBy(1, modelAttributes(model("service", TaskType.ANY, null))); + + verify(longCounter).incrementBy(eq(1L), eq(Map.of("service", "service", "task_type", TaskType.ANY.toString()))); + } + + public void testCreation() { + assertNotNull(InferenceStats.create(MeterRegistry.NOOP)); + } + + public void testRecordDurationWithoutError() { + var expectedLong = randomLong(); + var histogramCounter = mock(LongHistogram.class); + var stats = new InferenceStats(mock(), histogramCounter); + + stats.inferenceDuration().record(expectedLong, responseAttributes(model("service", TaskType.ANY, "modelId"), null)); + + verify(histogramCounter).record(eq(expectedLong), assertArg(attributes -> { + assertThat(attributes.get("service"), is("service")); + assertThat(attributes.get("task_type"), is(TaskType.ANY.toString())); + assertThat(attributes.get("model_id"), is("modelId")); + assertThat(attributes.get("status_code"), is(200)); + assertThat(attributes.get("error.type"), nullValue()); + })); + } + + /** + * "If response status code was sent or received and status indicates an error according to HTTP span status definition, + * error.type SHOULD be set to the status code number (represented as a string)" + * - https://opentelemetry.io/docs/specs/semconv/http/http-metrics/ + */ + public void testRecordDurationWithElasticsearchStatusException() { + var expectedLong = randomLong(); + var histogramCounter = mock(LongHistogram.class); + var stats = new InferenceStats(mock(), histogramCounter); + var statusCode = RestStatus.BAD_REQUEST; + var exception = new ElasticsearchStatusException("hello", statusCode); + var expectedError = String.valueOf(statusCode.getStatus()); + + stats.inferenceDuration().record(expectedLong, responseAttributes(model("service", TaskType.ANY, "modelId"), exception)); + + verify(histogramCounter).record(eq(expectedLong), assertArg(attributes -> { + assertThat(attributes.get("service"), is("service")); + assertThat(attributes.get("task_type"), is(TaskType.ANY.toString())); + assertThat(attributes.get("model_id"), is("modelId")); + assertThat(attributes.get("status_code"), is(statusCode.getStatus())); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + /** + * "If the request fails with an error before response status code was sent or received, + * error.type SHOULD be set to exception type" + * - https://opentelemetry.io/docs/specs/semconv/http/http-metrics/ + */ + public void testRecordDurationWithOtherException() { + var expectedLong = randomLong(); + var histogramCounter = mock(LongHistogram.class); + var stats = new InferenceStats(mock(), histogramCounter); + var exception = new IllegalStateException("ahh"); + var expectedError = exception.getClass().getSimpleName(); + + stats.inferenceDuration().record(expectedLong, responseAttributes(model("service", TaskType.ANY, "modelId"), exception)); + + verify(histogramCounter).record(eq(expectedLong), assertArg(attributes -> { + assertThat(attributes.get("service"), is("service")); + assertThat(attributes.get("task_type"), is(TaskType.ANY.toString())); + assertThat(attributes.get("model_id"), is("modelId")); + assertThat(attributes.get("status_code"), nullValue()); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + public void testRecordDurationWithUnparsedModelAndElasticsearchStatusException() { + var expectedLong = randomLong(); + var histogramCounter = mock(LongHistogram.class); + var stats = new InferenceStats(mock(), histogramCounter); + var statusCode = RestStatus.BAD_REQUEST; + var exception = new ElasticsearchStatusException("hello", statusCode); + var expectedError = String.valueOf(statusCode.getStatus()); + + var unparsedModel = new UnparsedModel("inferenceEntityId", TaskType.ANY, "service", Map.of(), Map.of()); + + stats.inferenceDuration().record(expectedLong, responseAttributes(unparsedModel, exception)); + + verify(histogramCounter).record(eq(expectedLong), assertArg(attributes -> { + assertThat(attributes.get("service"), is("service")); + assertThat(attributes.get("task_type"), is(TaskType.ANY.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(statusCode.getStatus())); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + public void testRecordDurationWithUnparsedModelAndOtherException() { + var expectedLong = randomLong(); + var histogramCounter = mock(LongHistogram.class); + var stats = new InferenceStats(mock(), histogramCounter); + var exception = new IllegalStateException("ahh"); + var expectedError = exception.getClass().getSimpleName(); + + var unparsedModel = new UnparsedModel("inferenceEntityId", TaskType.ANY, "service", Map.of(), Map.of()); + + stats.inferenceDuration().record(expectedLong, responseAttributes(unparsedModel, exception)); + + verify(histogramCounter).record(eq(expectedLong), assertArg(attributes -> { + assertThat(attributes.get("service"), is("service")); + assertThat(attributes.get("task_type"), is(TaskType.ANY.toString())); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), nullValue()); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + public void testRecordDurationWithUnknownModelAndElasticsearchStatusException() { + var expectedLong = randomLong(); + var histogramCounter = mock(LongHistogram.class); + var stats = new InferenceStats(mock(), histogramCounter); + var statusCode = RestStatus.BAD_REQUEST; + var exception = new ElasticsearchStatusException("hello", statusCode); + var expectedError = String.valueOf(statusCode.getStatus()); + + stats.inferenceDuration().record(expectedLong, responseAttributes(exception)); + + verify(histogramCounter).record(eq(expectedLong), assertArg(attributes -> { + assertThat(attributes.get("service"), nullValue()); + assertThat(attributes.get("task_type"), nullValue()); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), is(statusCode.getStatus())); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + public void testRecordDurationWithUnknownModelAndOtherException() { + var expectedLong = randomLong(); + var histogramCounter = mock(LongHistogram.class); + var stats = new InferenceStats(mock(), histogramCounter); + var exception = new IllegalStateException("ahh"); + var expectedError = exception.getClass().getSimpleName(); + + stats.inferenceDuration().record(expectedLong, responseAttributes(exception)); + + verify(histogramCounter).record(eq(expectedLong), assertArg(attributes -> { + assertThat(attributes.get("service"), nullValue()); + assertThat(attributes.get("task_type"), nullValue()); + assertThat(attributes.get("model_id"), nullValue()); + assertThat(attributes.get("status_code"), nullValue()); + assertThat(attributes.get("error.type"), is(expectedError)); + })); + } + + private Model model(String service, TaskType taskType, String modelId) { + var configuration = mock(ModelConfigurations.class); + when(configuration.getService()).thenReturn(service); + var settings = mock(ServiceSettings.class); + if (modelId != null) { + when(settings.modelId()).thenReturn(modelId); + } + + var model = mock(Model.class); + when(model.getTaskType()).thenReturn(taskType); + when(model.getConfigurations()).thenReturn(configuration); + when(model.getServiceSettings()).thenReturn(settings); + + return model; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/InferenceTimerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/InferenceTimerTests.java new file mode 100644 index 0000000000000..72b29d176f8c1 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/telemetry/InferenceTimerTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.telemetry; + +import org.elasticsearch.test.ESTestCase; + +import java.time.Clock; +import java.time.Instant; +import java.time.temporal.ChronoUnit; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class InferenceTimerTests extends ESTestCase { + + public void testElapsedMillis() { + var expectedDuration = randomLongBetween(10, 300); + + var startTime = Instant.now(); + var clock = mock(Clock.class); + when(clock.instant()).thenReturn(startTime).thenReturn(startTime.plus(expectedDuration, ChronoUnit.MILLIS)); + var timer = InferenceTimer.start(clock); + + assertThat(expectedDuration, is(timer.elapsedMillis())); + } +} diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml index c2704a4c22914..3d3790d879ef1 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml @@ -101,7 +101,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -132,7 +132,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: [40, 49.678] + inference_field: [ 40, 49.678 ] refresh: true - do: @@ -229,7 +229,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -260,7 +260,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: [45.1, 100] + inference_field: [ 45.1, 100 ] refresh: true - do: @@ -387,7 +387,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -418,7 +418,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -440,7 +440,7 @@ setup: - match: { hits.hits.0._id: "doc_1" } - close_to: { hits.hits.0._score: { value: 3.783733e19, error: 1e13 } } - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } - - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + - match: { hits.hits.0.matched_queries: [ "i-like-naming-my-queries" ] } --- "Query an index alias": @@ -452,7 +452,7 @@ setup: index: test-sparse-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -503,6 +503,48 @@ setup: - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.root_cause.0.reason: "Field [non_inference_field] of type [text] does not support semantic queries" } +--- +"Query the wrong field type with lenient: true": + - requires: + cluster_features: "search.semantic_match_query_rewrite_interception_supported" + reason: lenient introduced in 8.18.0 + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: "inference test" + non_inference_field: "non inference test" + refresh: true + + - do: + catch: bad_request + search: + index: test-sparse-index + body: + query: + semantic: + field: "non_inference_field" + query: "inference test" + + - match: { error.type: "search_phase_execution_exception" } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Field [non_inference_field] of type [text] does not support semantic queries" } + + - do: + search: + index: test-sparse-index + body: + query: + semantic: + field: "non_inference_field" + query: "inference test" + lenient: true + + - match: { hits.total.value: 0 } + + --- "Query a missing field": - do: @@ -783,7 +825,7 @@ setup: index: test-dense-index id: doc_1 body: - inference_field: ["inference test", "another inference test"] + inference_field: [ "inference test", "another inference test" ] non_inference_field: "non inference test" refresh: true @@ -844,11 +886,11 @@ setup: "Query a field that uses the default ELSER 2 endpoint": - requires: reason: "default ELSER 2 inference ID is enabled via a capability" - test_runner_features: [capabilities] + test_runner_features: [ capabilities ] capabilities: - method: GET path: /_inference - capabilities: [default_elser_2] + capabilities: [ default_elser_2 ] - do: indices.create: diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/45_semantic_text_match.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/45_semantic_text_match.yml new file mode 100644 index 0000000000000..cdbf73d31a272 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/45_semantic_text_match.yml @@ -0,0 +1,284 @@ +setup: + - requires: + cluster_features: "search.semantic_match_query_rewrite_interception_supported" + reason: semantic_text match support introduced in 8.18.0 + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id-2 + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: text_embedding + inference_id: dense-inference-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 10, + "api_key": "abc64", + "similarity": "COSINE" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-sparse-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + + - do: + indices.create: + index: test-dense-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: dense-inference-id + non_inference_field: + type: text + + - do: + indices.create: + index: test-text-only-index + body: + mappings: + properties: + inference_field: + type: text + non_inference_field: + type: text + +--- +"Query using a sparse embedding model": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-sparse-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Query using a dense embedding model": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-dense-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-dense-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Query an index alias": + - skip: + features: [ "headers", "close_to" ] + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + indices.put_alias: + index: test-sparse-index + name: my-alias + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: my-alias + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Query indices with both semantic_text and regular text content": + + - do: + index: + index: test-sparse-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + index: + index: test-text-only-index + id: doc_2 + body: + inference_field: [ "inference test", "not an inference field" ] + non_inference_field: "non inference test" + refresh: true + + - do: + search: + index: + - test-sparse-index + - test-text-only-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "doc_1" } + - match: { hits.hits.1._id: "doc_2" } + + # Test querying multiple indices that either use the same inference ID or combine semantic_text with lexical search + - do: + indices.create: + index: test-sparse-index-2 + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + + - do: + index: + index: test-sparse-index-2 + id: doc_3 + body: + inference_field: "another inference test" + refresh: true + + - do: + search: + index: + - test-sparse-index* + - test-text-only-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: "doc_1" } + - match: { hits.hits.1._id: "doc_3" } + - match: { hits.hits.2._id: "doc_2" } + +--- +"Query a field that has no indexed inference results": + - skip: + features: [ "headers" ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-sparse-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 0 } + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-dense-index + body: + query: + match: + inference_field: + query: "inference test" + + - match: { hits.total.value: 0 } diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java index 7f2243ed76849..6e24e644cb2af 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java @@ -117,6 +117,23 @@ public void testAlreadyUpToDateDataStream() throws Exception { assertThat(status.totalIndices(), equalTo(backingIndexCount)); assertThat(status.totalIndicesToBeUpgraded(), equalTo(0)); }); + AcknowledgedResponse cancelResponse = client().execute( + CancelReindexDataStreamAction.INSTANCE, + new CancelReindexDataStreamAction.Request(dataStreamName) + ).actionGet(); + assertNotNull(cancelResponse); + assertThrows( + ResourceNotFoundException.class, + () -> client().execute(CancelReindexDataStreamAction.INSTANCE, new CancelReindexDataStreamAction.Request(dataStreamName)) + .actionGet() + ); + assertThrows( + ResourceNotFoundException.class, + () -> client().execute( + new ActionType(GetMigrationReindexStatusAction.NAME), + new GetMigrationReindexStatusAction.Request(dataStreamName) + ).actionGet() + ); } private int createDataStream(String dataStreamName) { diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java new file mode 100644 index 0000000000000..e492f035da866 --- /dev/null +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java @@ -0,0 +1,408 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.migrate.MigratePlugin; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; +import static org.hamcrest.Matchers.equalTo; + +public class ReindexDatastreamIndexIT extends ESIntegTestCase { + + private static final String MAPPING = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + } + } + } + } + """; + + @Override + protected Collection> nodePlugins() { + return List.of(MigratePlugin.class, ReindexPlugin.class, MockTransportService.TestPlugin.class, DataStreamsPlugin.class); + } + + public void testDestIndexDeletedIfExists() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + // empty source index + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + + // dest index with docs + var destIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + indicesAdmin().create(new CreateIndexRequest(destIndex)).actionGet(); + indexDocs(destIndex, 10); + assertHitCount(prepareSearch(destIndex).setSize(0), 10); + + // call reindex + client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)).actionGet(); + + // verify that dest still exists, but is now empty + assertTrue(indexExists(destIndex)); + assertHitCount(prepareSearch(destIndex).setSize(0), 0); + } + + public void testDestIndexNameSet() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + + // call reindex + var response = client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)) + .actionGet(); + + var expectedDestIndexName = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndexName, response.getDestIndex()); + } + + public void testDestIndexContainsDocs() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + // source index with docs + var numDocs = randomIntBetween(1, 100); + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + indexDocs(sourceIndex, numDocs); + + // call reindex + var response = client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)) + .actionGet(); + indicesAdmin().refresh(new RefreshRequest(response.getDestIndex())).actionGet(); + + // verify that dest contains docs + assertHitCount(prepareSearch(response.getDestIndex()).setSize(0), numDocs); + } + + public void testSetSourceToReadOnly() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + // empty source index + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + + // call reindex + client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)).actionGet(); + + // assert that write to source fails + var indexReq = new IndexRequest(sourceIndex).source(jsonBuilder().startObject().field("field", "1").endObject()); + assertThrows(ClusterBlockException.class, () -> client().index(indexReq).actionGet()); + assertHitCount(prepareSearch(sourceIndex).setSize(0), 0); + } + + public void testSettingsAddedBeforeReindex() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + // start with a static setting + var numShards = randomIntBetween(1, 10); + var staticSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build(); + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, staticSettings)).get(); + + // update with a dynamic setting + var numReplicas = randomIntBetween(0, 10); + var dynamicSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas).build(); + indicesAdmin().updateSettings(new UpdateSettingsRequest(dynamicSettings, sourceIndex)).actionGet(); + + // call reindex + var destIndex = client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)) + .actionGet() + .getDestIndex(); + + // assert both static and dynamic settings set on dest index + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertEquals(numReplicas, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS))); + assertEquals(numShards, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_SHARDS))); + } + + public void testMappingsAddedToDestIndex() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + String mapping = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + } + } + } + } + """; + indicesAdmin().create(new CreateIndexRequest(sourceIndex).mapping(mapping)).actionGet(); + + // call reindex + var destIndex = client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)) + .actionGet() + .getDestIndex(); + + var mappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(sourceIndex, destIndex)).actionGet(); + Map mappings = mappingsResponse.mappings(); + var destMappings = mappings.get(destIndex).sourceAsMap(); + var sourceMappings = mappings.get(sourceIndex).sourceAsMap(); + + assertEquals(sourceMappings, destMappings); + // sanity check specific value from dest mapping + assertEquals("text", XContentMapValues.extractValue("properties.foo1.type", destMappings)); + } + + public void testReadOnlyAddedBack() { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + // Create source index with read-only and/or block-writes + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + boolean isReadOnly = randomBoolean(); + boolean isBlockWrites = randomBoolean(); + var settings = Settings.builder() + .put(IndexMetadata.SETTING_READ_ONLY, isReadOnly) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, isBlockWrites) + .build(); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, settings)).actionGet(); + + // call reindex + var destIndex = client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)) + .actionGet() + .getDestIndex(); + + // assert read-only settings added back to dest index + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertEquals(isReadOnly, Boolean.parseBoolean(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_READ_ONLY))); + assertEquals(isBlockWrites, Boolean.parseBoolean(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_BLOCKS_WRITE))); + + removeReadOnly(sourceIndex); + removeReadOnly(destIndex); + } + + public void testSettingsAndMappingsFromTemplate() throws IOException { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var numShards = randomIntBetween(1, 10); + var numReplicas = randomIntBetween(0, 10); + + var settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build(); + + // Create template with settings and mappings + var template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) + .template(new Template(settings, CompressedXContent.fromJSON(MAPPING), null)) + .build(); + var request = new TransportPutComposableIndexTemplateAction.Request("logs-template"); + request.indexTemplate(template); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + + var sourceIndex = "logs-" + randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex)).actionGet(); + + // call reindex + var destIndex = client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)) + .actionGet() + .getDestIndex(); + + // verify settings from templates copied to dest index + { + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertEquals(numReplicas, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS))); + assertEquals(numShards, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_SHARDS))); + } + + // verify mappings from templates copied to dest index + { + var mappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(sourceIndex, destIndex)).actionGet(); + var destMappings = mappingsResponse.mappings().get(destIndex).sourceAsMap(); + var sourceMappings = mappingsResponse.mappings().get(sourceIndex).sourceAsMap(); + assertEquals(sourceMappings, destMappings); + // sanity check specific value from dest mapping + assertEquals("text", XContentMapValues.extractValue("properties.foo1.type", destMappings)); + } + } + + private static final String TSDB_MAPPING = """ + { + "_doc":{ + "properties": { + "@timestamp" : { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + } + } + } + }"""; + + private static final String TSDB_DOC = """ + { + "@timestamp": "$time", + "metricset": "pod", + "k8s": { + "pod": { + "name": "dog", + "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", + "ip": "10.10.55.3", + "network": { + "tx": 1434595272, + "rx": 530605511 + } + } + } + } + """; + + public void testTsdbStartEndSet() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var templateSettings = Settings.builder().put("index.mode", "time_series"); + if (randomBoolean()) { + templateSettings.put("index.routing_path", "metricset"); + } + var mapping = new CompressedXContent(TSDB_MAPPING); + + // create template + var request = new TransportPutComposableIndexTemplateAction.Request("id"); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + + // index doc + Instant time = Instant.now(); + String backingIndexName; + { + var indexRequest = new IndexRequest("k8s").opType(DocWriteRequest.OpType.CREATE); + indexRequest.source(TSDB_DOC.replace("$time", formatInstant(time)), XContentType.JSON); + var indexResponse = client().index(indexRequest).actionGet(); + backingIndexName = indexResponse.getIndex(); + } + + var sourceSettings = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndexName)) + .actionGet() + .getSettings() + .get(backingIndexName); + Instant startTime = IndexSettings.TIME_SERIES_START_TIME.get(sourceSettings); + Instant endTime = IndexSettings.TIME_SERIES_END_TIME.get(sourceSettings); + + // sanity check start/end time on source + assertNotNull(startTime); + assertNotNull(endTime); + assertTrue(endTime.isAfter(startTime)); + + // force a rollover so can call reindex and delete + var rolloverRequest = new RolloverRequest("k8s", null); + var rolloverResponse = indicesAdmin().rolloverIndex(rolloverRequest).actionGet(); + rolloverResponse.getNewIndex(); + + // call reindex on the original backing index + var destIndex = client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(backingIndexName)) + .actionGet() + .getDestIndex(); + + var destSettings = indicesAdmin().getIndex(new GetIndexRequest().indices(destIndex)).actionGet().getSettings().get(destIndex); + var destStart = IndexSettings.TIME_SERIES_START_TIME.get(destSettings); + var destEnd = IndexSettings.TIME_SERIES_END_TIME.get(destSettings); + + assertEquals(startTime, destStart); + assertEquals(endTime, destEnd); + } + + // TODO more logsdb/tsdb specific tests + // TODO more data stream specific tests (how are data streams indices are different from regular indices?) + // TODO check other IndexMetadata fields that need to be fixed after the fact + // TODO what happens if don't have necessary perms for a given index? + + private static void removeReadOnly(String index) { + var settings = Settings.builder() + .put(IndexMetadata.SETTING_READ_ONLY, false) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, false) + .build(); + assertAcked(indicesAdmin().updateSettings(new UpdateSettingsRequest(settings, index)).actionGet()); + } + + private static void indexDocs(String index, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(index).opType(DocWriteRequest.OpType.CREATE) + .id(i + "") + .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) + ); + } + BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(numDocs)); + indicesAdmin().refresh(new RefreshRequest(index)).actionGet(); + } + + private static String formatInstant(Instant instant) { + return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); + } + + private static String getIndexUUID(String index) { + return indicesAdmin().getIndex(new GetIndexRequest().indices(index)) + .actionGet() + .getSettings() + .get(index) + .get(IndexMetadata.SETTING_INDEX_UUID); + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java index 1af66a2c61d56..f42d05727b9fd 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java @@ -32,10 +32,15 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamAction; +import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamTransportAction; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusTransportAction; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexAction; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexTransportAction; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamTransportAction; +import org.elasticsearch.xpack.migrate.rest.RestCancelReindexDataStreamAction; import org.elasticsearch.xpack.migrate.rest.RestGetMigrationReindexStatusAction; import org.elasticsearch.xpack.migrate.rest.RestMigrationReindexAction; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamPersistentTaskExecutor; @@ -69,6 +74,7 @@ public List getRestHandlers( if (REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()) { handlers.add(new RestMigrationReindexAction()); handlers.add(new RestGetMigrationReindexStatusAction()); + handlers.add(new RestCancelReindexDataStreamAction()); } return handlers; } @@ -79,6 +85,8 @@ public List getRestHandlers( if (REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()) { actions.add(new ActionHandler<>(ReindexDataStreamAction.INSTANCE, ReindexDataStreamTransportAction.class)); actions.add(new ActionHandler<>(GetMigrationReindexStatusAction.INSTANCE, GetMigrationReindexStatusTransportAction.class)); + actions.add(new ActionHandler<>(CancelReindexDataStreamAction.INSTANCE, CancelReindexDataStreamTransportAction.class)); + actions.add(new ActionHandler<>(ReindexDataStreamIndexAction.INSTANCE, ReindexDataStreamIndexTransportAction.class)); } return actions; } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamAction.java new file mode 100644 index 0000000000000..635d8b8f30978 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamAction.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public class CancelReindexDataStreamAction extends ActionType { + + public static final CancelReindexDataStreamAction INSTANCE = new CancelReindexDataStreamAction(); + public static final String NAME = "indices:admin/data_stream/reindex_cancel"; + + public CancelReindexDataStreamAction() { + super(NAME); + } + + public static class Request extends ActionRequest implements IndicesRequest { + private final String index; + + public Request(String index) { + super(); + this.index = index; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.index = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public boolean getShouldStoreResult() { + return true; + } + + public String getIndex() { + return index; + } + + @Override + public int hashCode() { + return Objects.hashCode(index); + } + + @Override + public boolean equals(Object other) { + return other instanceof Request && index.equals(((Request) other).index); + } + + public Request nodeRequest(String thisNodeId, long thisTaskId) { + Request copy = new Request(index); + copy.setParentTask(thisNodeId, thisTaskId); + return copy; + } + + @Override + public String[] indices() { + return new String[] { index }; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamTransportAction.java new file mode 100644 index 0000000000000..00a846bf7eb9a --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamTransportAction.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamAction.Request; + +public class CancelReindexDataStreamTransportAction extends HandledTransportAction { + private final PersistentTasksService persistentTasksService; + + @Inject + public CancelReindexDataStreamTransportAction( + TransportService transportService, + ActionFilters actionFilters, + PersistentTasksService persistentTasksService + ) { + super(CancelReindexDataStreamAction.NAME, transportService, actionFilters, Request::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.persistentTasksService = persistentTasksService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + String index = request.getIndex(); + String persistentTaskId = ReindexDataStreamAction.TASK_ID_PREFIX + index; + /* + * This removes the persistent task from the cluster state and results in the running task being cancelled (but not removed from + * the task manager). The running task is removed from the task manager in ReindexDataStreamTask::onCancelled, which is called as + * as result of this. + */ + persistentTasksService.sendRemoveRequest(persistentTaskId, TimeValue.MAX_VALUE, new ActionListener<>() { + @Override + public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + listener.onResponse(AcknowledgedResponse.TRUE); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java index f2a6e33f7cb05..ca81a03fc5630 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java @@ -88,7 +88,7 @@ void getRunningTaskFromNode(String persistentTaskId, ActionListener li listener.onFailure( new ResourceNotFoundException( Strings.format( - "Persistent task [{}] is supposed to be running on node [{}], " + "but the task is not found on that node", + "Persistent task [%s] is supposed to be running on node [%s], but the task is not found on that node", persistentTaskId, clusterService.localNode().getId() ) @@ -106,7 +106,7 @@ private void runOnNodeWithTaskIfPossible(Task thisTask, Request request, String listener.onFailure( new ResourceNotFoundException( Strings.format( - "Persistent task [{}] is supposed to be running on node [{}], but that node is not part of the cluster", + "Persistent task [%s] is supposed to be running on node [%s], but that node is not part of the cluster", request.getIndex(), nodeId ) diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java index 9e4cbb1082215..fcb1037419b17 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java @@ -13,10 +13,14 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -39,10 +43,24 @@ public class ReindexDataStreamAction extends ActionType getOldIndexVersionPredicate(Metadata metadata) { + return index -> metadata.index(index).getCreationVersion().onOrBefore(MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE); + } + public enum Mode { UPGRADE } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java new file mode 100644 index 0000000000000..00c81fdc9fbc6 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public class ReindexDataStreamIndexAction extends ActionType { + + public static final String NAME = "indices:admin/data_stream/index/reindex"; + + public static final ActionType INSTANCE = new ReindexDataStreamIndexAction(); + + private ReindexDataStreamIndexAction() { + super(NAME); + } + + public static class Request extends ActionRequest implements IndicesRequest { + + private final String sourceIndex; + + public Request(String sourceIndex) { + this.sourceIndex = sourceIndex; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.sourceIndex = in.readString(); + } + + public static Request readFrom(StreamInput in) throws IOException { + return new Request(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(sourceIndex); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public String getSourceIndex() { + return sourceIndex; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(sourceIndex, request.sourceIndex); + } + + @Override + public int hashCode() { + return Objects.hash(sourceIndex); + } + + @Override + public String[] indices() { + return new String[] { sourceIndex }; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + } + + public static class Response extends ActionResponse { + private final String destIndex; + + public Response(String destIndex) { + this.destIndex = destIndex; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.destIndex = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(destIndex); + } + + public String getDestIndex() { + return destIndex; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(destIndex, response.destIndex); + } + + @Override + public int hashCode() { + return Objects.hash(destIndex); + } + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java new file mode 100644 index 0000000000000..8863c45691c92 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.migrate.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +public class ReindexDataStreamIndexTransportAction extends HandledTransportAction< + ReindexDataStreamIndexAction.Request, + ReindexDataStreamIndexAction.Response> { + + private static final Logger logger = LogManager.getLogger(ReindexDataStreamIndexTransportAction.class); + + private static final Set SETTINGS_TO_ADD_BACK = Set.of(IndexMetadata.SETTING_BLOCKS_WRITE, IndexMetadata.SETTING_READ_ONLY); + + private static final IndicesOptions IGNORE_MISSING_OPTIONS = IndicesOptions.fromOptions(true, true, false, false); + private final ClusterService clusterService; + private final Client client; + private final IndexScopedSettings indexScopedSettings; + + @Inject + public ReindexDataStreamIndexTransportAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client, + IndexScopedSettings indexScopedSettings + ) { + super( + ReindexDataStreamIndexAction.NAME, + false, + transportService, + actionFilters, + ReindexDataStreamIndexAction.Request::new, + transportService.getThreadPool().executor(ThreadPool.Names.GENERIC) + ); + this.clusterService = clusterService; + this.client = client; + this.indexScopedSettings = indexScopedSettings; + } + + @Override + protected void doExecute( + Task task, + ReindexDataStreamIndexAction.Request request, + ActionListener listener + ) { + var sourceIndexName = request.getSourceIndex(); + var destIndexName = generateDestIndexName(sourceIndexName); + IndexMetadata sourceIndex = clusterService.state().getMetadata().index(sourceIndexName); + Settings settingsBefore = sourceIndex.getSettings(); + + var hasOldVersion = ReindexDataStreamAction.getOldIndexVersionPredicate(clusterService.state().metadata()); + if (hasOldVersion.test(sourceIndex.getIndex()) == false) { + logger.warn( + "Migrating index [{}] with version [{}] is unnecessary as its version is not before [{}]", + sourceIndexName, + sourceIndex.getCreationVersion(), + ReindexDataStreamAction.MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE + ); + } + + SubscribableListener.newForked(l -> setBlockWrites(sourceIndexName, l)) + .andThen(l -> deleteDestIfExists(destIndexName, l)) + .andThen(l -> createIndex(sourceIndex, destIndexName, l)) + .andThen(l -> reindex(sourceIndexName, destIndexName, l)) + .andThen(l -> updateSettings(settingsBefore, destIndexName, l)) + .andThenApply(ignored -> new ReindexDataStreamIndexAction.Response(destIndexName)) + .addListener(listener); + } + + private void setBlockWrites(String sourceIndexName, ActionListener listener) { + logger.debug("Setting write block on source index [{}]", sourceIndexName); + final Settings readOnlySettings = Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build(); + var updateSettingsRequest = new UpdateSettingsRequest(readOnlySettings, sourceIndexName); + client.admin().indices().updateSettings(updateSettingsRequest, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse response) { + if (response.isAcknowledged()) { + listener.onResponse(null); + } else { + var errorMessage = String.format(Locale.ROOT, "Could not set read-only on source index [%s]", sourceIndexName); + listener.onFailure(new ElasticsearchException(errorMessage)); + } + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ClusterBlockException || e.getCause() instanceof ClusterBlockException) { + // It's fine if read-only is already set + listener.onResponse(null); + } else { + listener.onFailure(e); + } + } + }); + } + + private void deleteDestIfExists(String destIndexName, ActionListener listener) { + logger.debug("Attempting to delete index [{}]", destIndexName); + var deleteIndexRequest = new DeleteIndexRequest(destIndexName).indicesOptions(IGNORE_MISSING_OPTIONS) + .masterNodeTimeout(TimeValue.MAX_VALUE); + var errorMessage = String.format(Locale.ROOT, "Failed to acknowledge delete of index [%s]", destIndexName); + client.admin().indices().delete(deleteIndexRequest, failIfNotAcknowledged(listener, errorMessage)); + } + + private void createIndex(IndexMetadata sourceIndex, String destIndexName, ActionListener listener) { + logger.debug("Creating destination index [{}] for source index [{}]", destIndexName, sourceIndex.getIndex().getName()); + + // Create destination with subset of source index settings that can be added before reindex + var settings = getPreSettings(sourceIndex); + + var sourceMapping = sourceIndex.mapping(); + Map mapping = sourceMapping != null ? sourceMapping.rawSourceAsMap() : Map.of(); + var createIndexRequest = new CreateIndexRequest(destIndexName).settings(settings).mapping(mapping); + + var errorMessage = String.format(Locale.ROOT, "Could not create index [%s]", destIndexName); + client.admin().indices().create(createIndexRequest, failIfNotAcknowledged(listener, errorMessage)); + } + + private void reindex(String sourceIndexName, String destIndexName, ActionListener listener) { + logger.debug("Reindex to destination index [{}] from source index [{}]", destIndexName, sourceIndexName); + var reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndexName); + reindexRequest.getSearchRequest().allowPartialSearchResults(false); + reindexRequest.getSearchRequest().source().fetchSource(true); + reindexRequest.setDestIndex(destIndexName); + client.execute(ReindexAction.INSTANCE, reindexRequest, listener); + } + + private void updateSettings(Settings settingsBefore, String destIndexName, ActionListener listener) { + logger.debug("Adding settings from source index that could not be added before reindex"); + + Settings postSettings = getPostSettings(settingsBefore); + if (postSettings.isEmpty()) { + listener.onResponse(null); + return; + } + + var updateSettingsRequest = new UpdateSettingsRequest(postSettings, destIndexName); + var errorMessage = String.format(Locale.ROOT, "Could not update settings on index [%s]", destIndexName); + client.admin().indices().updateSettings(updateSettingsRequest, failIfNotAcknowledged(listener, errorMessage)); + } + + // Filter source index settings to subset of settings that can be included during reindex. + // Similar to the settings filtering done when reindexing for upgrade in Kibana + // https://github.com/elastic/kibana/blob/8a8363f02cc990732eb9cbb60cd388643a336bed/x-pack + // /plugins/upgrade_assistant/server/lib/reindexing/index_settings.ts#L155 + private Settings getPreSettings(IndexMetadata sourceIndex) { + // filter settings that will be added back later + var filtered = sourceIndex.getSettings().filter(settingName -> SETTINGS_TO_ADD_BACK.contains(settingName) == false); + + // filter private and non-copyable settings + var builder = MetadataCreateIndexService.copySettingsFromSource(false, filtered, indexScopedSettings, Settings.builder()); + return builder.build(); + } + + private Settings getPostSettings(Settings settingsBefore) { + return settingsBefore.filter(SETTINGS_TO_ADD_BACK::contains); + } + + public static String generateDestIndexName(String sourceIndex) { + return "migrated-" + sourceIndex; + } + + private static ActionListener failIfNotAcknowledged( + ActionListener listener, + String errorMessage + ) { + return listener.delegateFailureAndWrap((delegate, response) -> { + if (response.isAcknowledged()) { + delegate.onResponse(null); + } + throw new ElasticsearchException(errorMessage); + }); + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java index 95a078690a055..f011c429ce79c 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTaskParams; import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.TASK_ID_PREFIX; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; /* * This transport action creates a new persistent task for reindexing the source data stream given in the request. On successful creation @@ -67,10 +68,7 @@ protected void doExecute(Task task, ReindexDataStreamRequest request, ActionList return; } int totalIndices = dataStream.getIndices().size(); - int totalIndicesToBeUpgraded = (int) dataStream.getIndices() - .stream() - .filter(index -> metadata.index(index).getCreationVersion().isLegacyIndexVersion()) - .count(); + int totalIndicesToBeUpgraded = (int) dataStream.getIndices().stream().filter(getOldIndexVersionPredicate(metadata)).count(); ReindexDataStreamTaskParams params = new ReindexDataStreamTaskParams( sourceDataStreamName, transportService.getThreadPool().absoluteTimeInMillis(), diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestCancelReindexDataStreamAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestCancelReindexDataStreamAction.java new file mode 100644 index 0000000000000..0bd68e8b2df73 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestCancelReindexDataStreamAction.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamAction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestCancelReindexDataStreamAction extends BaseRestHandler { + + @Override + public String getName() { + return "cancel_reindex_data_stream_action"; + } + + @Override + public List routes() { + return List.of(new Route(POST, "/_migration/reindex/{index}/_cancel")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String index = request.param("index"); + CancelReindexDataStreamAction.Request cancelTaskRequest = new CancelReindexDataStreamAction.Request(index); + return channel -> client.execute(CancelReindexDataStreamAction.INSTANCE, cancelTaskRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index b9e0c5f738e38..3ffb58f14b666 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; + public class ReindexDataStreamPersistentTaskExecutor extends PersistentTasksExecutor { private static final TimeValue TASK_KEEP_ALIVE_TIME = TimeValue.timeValueDays(1); private final Client client; @@ -72,7 +74,7 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask if (dataStreamInfos.size() == 1) { List indices = dataStreamInfos.get(0).getDataStream().getIndices(); List indicesToBeReindexed = indices.stream() - .filter(index -> clusterService.state().getMetadata().index(index).getCreationVersion().isLegacyIndexVersion()) + .filter(getOldIndexVersionPredicate(clusterService.state().metadata())) .toList(); reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); for (Index index : indicesToBeReindexed) { @@ -89,13 +91,11 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask } private void completeSuccessfulPersistentTask(ReindexDataStreamTask persistentTask) { - persistentTask.allReindexesCompleted(); - threadPool.schedule(persistentTask::markAsCompleted, getTimeToLive(persistentTask), threadPool.generic()); + persistentTask.allReindexesCompleted(threadPool, getTimeToLive(persistentTask)); } private void completeFailedPersistentTask(ReindexDataStreamTask persistentTask, Exception e) { - persistentTask.taskFailed(e); - threadPool.schedule(() -> persistentTask.markAsFailed(e), getTimeToLive(persistentTask), threadPool.generic()); + persistentTask.taskFailed(threadPool, getTimeToLive(persistentTask), e); } private TimeValue getTimeToLive(ReindexDataStreamTask reindexDataStreamTask) { diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java index 72ddb87e9dea5..844f24f45ab77 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java @@ -7,9 +7,12 @@ package org.elasticsearch.xpack.migrate.task; +import org.elasticsearch.common.util.concurrent.RunOnce; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.List; @@ -21,12 +24,14 @@ public class ReindexDataStreamTask extends AllocatedPersistentTask { private final long persistentTaskStartTime; private final int totalIndices; private final int totalIndicesToBeUpgraded; - private boolean complete = false; - private Exception exception; - private AtomicInteger inProgress = new AtomicInteger(0); - private AtomicInteger pending = new AtomicInteger(); - private List> errors = new ArrayList<>(); + private volatile boolean complete = false; + private volatile Exception exception; + private final AtomicInteger inProgress = new AtomicInteger(0); + private final AtomicInteger pending = new AtomicInteger(); + private final List> errors = new ArrayList<>(); + private final RunOnce completeTask; + @SuppressWarnings("this-escape") public ReindexDataStreamTask( long persistentTaskStartTime, int totalIndices, @@ -42,6 +47,13 @@ public ReindexDataStreamTask( this.persistentTaskStartTime = persistentTaskStartTime; this.totalIndices = totalIndices; this.totalIndicesToBeUpgraded = totalIndicesToBeUpgraded; + this.completeTask = new RunOnce(() -> { + if (exception == null) { + markAsCompleted(); + } else { + markAsFailed(exception); + } + }); } @Override @@ -58,13 +70,18 @@ public ReindexDataStreamStatus getStatus() { ); } - public void allReindexesCompleted() { + public void allReindexesCompleted(ThreadPool threadPool, TimeValue timeToLive) { this.complete = true; + if (isCancelled()) { + completeTask.run(); + } else { + threadPool.schedule(completeTask, timeToLive, threadPool.generic()); + } } - public void taskFailed(Exception e) { - this.complete = true; + public void taskFailed(ThreadPool threadPool, TimeValue timeToLive, Exception e) { this.exception = e; + allReindexesCompleted(threadPool, timeToLive); } public void reindexSucceeded() { @@ -84,4 +101,16 @@ public void incrementInProgressIndicesCount() { public void setPendingIndicesCount(int size) { pending.set(size); } + + @Override + public void onCancelled() { + /* + * If the task is complete, but just waiting for its scheduled removal, we go ahead and call markAsCompleted/markAsFailed + * immediately. This results in the running task being removed from the task manager. If the task is not complete, then one of + * allReindexesCompleted or taskFailed will be called in the future, resulting in the same thing. + */ + if (complete) { + completeTask.run(); + } + } } diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamRequestTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamRequestTests.java new file mode 100644 index 0000000000000..187561dae19b0 --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/CancelReindexDataStreamRequestTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamAction.Request; + +import java.io.IOException; + +public class CancelReindexDataStreamRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLength(30)); + } + + @Override + protected Request mutateInstance(Request instance) throws IOException { + return new Request(instance.getIndex() + randomAlphaOfLength(5)); + } +} diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexRequestTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexRequestTests.java new file mode 100644 index 0000000000000..a057056474ef1 --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexRequestTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexAction.Request; + +public class ReindexDatastreamIndexRequestTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLength(20)); + } + + @Override + protected Request mutateInstance(Request instance) { + return new ReindexDataStreamIndexAction.Request(randomValueOtherThan(instance.getSourceIndex(), () -> randomAlphaOfLength(20))); + } +} diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexResponseTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexResponseTests.java new file mode 100644 index 0000000000000..752e173585f0e --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexAction.Response; + +public class ReindexDatastreamIndexResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return Response::new; + } + + @Override + protected Response createTestInstance() { + return new Response(randomAlphaOfLength(20)); + } + + @Override + protected Response mutateInstance(Response instance) { + return new Response(randomValueOtherThan(instance.getDestIndex(), () -> randomAlphaOfLength(20))); + } +} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 4e92cad1026a3..04f349d67d7fe 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -1142,6 +1142,22 @@ public void testDeploymentThreadsIncludedInUsage() throws IOException { } } + public void testInferEmptyInput() throws IOException { + String modelId = "empty_input"; + createPassThroughModel(modelId); + putModelDefinition(modelId); + putVocabulary(List.of("these", "are", "my", "words"), modelId); + startDeployment(modelId); + + Request request = new Request("POST", "/_ml/trained_models/" + modelId + "/_infer?timeout=30s"); + request.setJsonEntity(""" + { "docs": [] } + """); + + var inferenceResponse = client().performRequest(request); + assertThat(EntityUtils.toString(inferenceResponse.getEntity()), equalTo("{\"inference_results\":[]}")); + } + private void putModelDefinition(String modelId) throws IOException { putModelDefinition(modelId, BASE_64_ENCODED_MODEL, RAW_MODEL_SIZE); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java index 08fda90f9fd73..8fe87b043c78b 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java @@ -195,7 +195,7 @@ public void testOpenJobWithOldSnapshot() { assertThat( ex.getMessage(), containsString( - "[open-job-with-old-model-snapshot] job model snapshot [snap_1] has min version before [7.0.0], " + "[open-job-with-old-model-snapshot] job model snapshot [snap_1] has min version before [8.3.0], " + "please revert to a newer model snapshot or reset the job" ) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index e0405b1749536..20a4ceeae59b3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -132,6 +132,11 @@ protected void doExecute(Task task, Request request, ActionListener li Response.Builder responseBuilder = Response.builder(); TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); + if (request.numberOfDocuments() == 0) { + listener.onResponse(responseBuilder.setId(request.getId()).build()); + return; + } + if (MachineLearning.INFERENCE_AGG_FEATURE.check(licenseState)) { responseBuilder.setLicensed(true); doInfer(task, request, responseBuilder, parentTaskId, listener); diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java index d20f0f88aeb16..bdd6d73ec0fbf 100644 --- a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java @@ -54,7 +54,9 @@ public void testRetrieverExtractionErrors() throws IOException { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true) - .rewrite(new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")))) + .rewrite( + new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")), null) + ) ); assertEquals("[search_after] cannot be used in children of compound retrievers", iae.getMessage()); } @@ -70,7 +72,9 @@ public void testRetrieverExtractionErrors() throws IOException { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true) - .rewrite(new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")))) + .rewrite( + new QueryRewriteContext(parserConfig(), null, null, null, new PointInTimeBuilder(new BytesArray("pitid")), null) + ) ); assertEquals("[terminate_after] cannot be used in children of compound retrievers", iae.getMessage()); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java index 3ee49cce85a8a..6115bec91ad62 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -355,6 +355,18 @@ protected void assertExecutorIsIdle(String executorName) throws Exception { }); } + protected static void waitUntilRecoveryIsDone(String index) throws Exception { + assertBusy(() -> { + RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries(index).get(); + assertThat(recoveryResponse.hasRecoveries(), equalTo(true)); + for (List value : recoveryResponse.shardRecoveryStates().values()) { + for (RecoveryState recoveryState : value) { + assertThat(recoveryState.getStage(), equalTo(RecoveryState.Stage.DONE)); + } + } + }); + } + public static class LicensedSnapshotBasedRecoveriesPlugin extends SnapshotBasedRecoveriesPlugin { public LicensedSnapshotBasedRecoveriesPlugin(Settings settings) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index ff8e233fce8b5..de249f7f07e58 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.searchablesnapshots; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; @@ -36,7 +35,6 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.snapshots.SnapshotId; @@ -1324,18 +1322,6 @@ private static IndexMetadata getIndexMetadata(String indexName) { .index(indexName); } - private static void waitUntilRecoveryIsDone(String index) throws Exception { - assertBusy(() -> { - RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries(index).get(); - assertThat(recoveryResponse.hasRecoveries(), equalTo(true)); - for (List value : recoveryResponse.shardRecoveryStates().values()) { - for (RecoveryState recoveryState : value) { - assertThat(recoveryState.getStage(), equalTo(RecoveryState.Stage.DONE)); - } - } - }); - } - private void waitUntilAllShardsAreUnassigned(Index index) throws Exception { awaitClusterState(state -> state.getRoutingTable().index(index).allPrimaryShardsUnassigned()); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsSearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsSearchIntegTests.java new file mode 100644 index 0000000000000..c60ebc884dbed --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsSearchIntegTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.searchablesnapshots; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING; +import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.hamcrest.Matchers.equalTo; + +public class SearchableSnapshotsSearchIntegTests extends BaseFrozenSearchableSnapshotsIntegTestCase { + + /** + * Tests basic search functionality with a query sorted by field against partially mounted indices + * The can match phase is always executed against read only indices, and for sorted queries it extracts the min and max range from + * each shard. This will happen not only in the can match phase, but optionally also in the query phase. + * See {@link org.elasticsearch.search.internal.ShardSearchRequest#canReturnNullResponseIfMatchNoDocs()}. + * For keyword fields, it is not possible to retrieve min and max from the index reader on frozen, hence we need to make sure that + * while that fails, the query will go ahead and won't return shard failures. + */ + public void testKeywordSortedQueryOnFrozen() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode(); + String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode(); + + String[] indices = new String[] { "index-0001", "index-0002" }; + for (String index : indices) { + Settings extraSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build(); + // we use a high number of shards because that's more likely to trigger can match as part of query phase: + // see ShardSearchRequest#canReturnNullResponseIfMatchNoDocs + assertAcked( + indicesAdmin().prepareCreate(index) + .setSettings(indexSettingsNoReplicas(10).put(INDEX_SOFT_DELETES_SETTING.getKey(), true).put(extraSettings)) + ); + } + ensureGreen(indices); + + for (String index : indices) { + final List indexRequestBuilders = new ArrayList<>(); + indexRequestBuilders.add(prepareIndex(index).setSource("keyword", "value1")); + indexRequestBuilders.add(prepareIndex(index).setSource("keyword", "value2")); + indexRandom(true, false, indexRequestBuilders); + assertThat( + indicesAdmin().prepareForceMerge(index).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), + equalTo(0) + ); + refresh(index); + forceMerge(); + } + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indices[0])).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indices[0])); + + // Block the repository for the node holding the searchable snapshot shards + // to delay its restore + blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot); + + // Force the searchable snapshot to be allocated in a particular node + Settings restoredIndexSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot) + .build(); + String[] mountedIndices = new String[indices.length]; + for (int i = 0; i < indices.length; i++) { + + String index = indices[i]; + String mountedIndex = index + "-mounted"; + mountedIndices[i] = mountedIndex; + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + mountedIndex, + repositoryName, + snapshotId.getName(), + indices[0], + restoredIndexSettings, + Strings.EMPTY_ARRAY, + false, + randomFrom(MountSearchableSnapshotRequest.Storage.values()) + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + } + + // Allow the searchable snapshots to be finally mounted + unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot); + for (String mountedIndex : mountedIndices) { + waitUntilRecoveryIsDone(mountedIndex); + } + ensureGreen(mountedIndices); + + SearchRequest request = new SearchRequest(mountedIndices).searchType(SearchType.QUERY_THEN_FETCH) + .source(SearchSourceBuilder.searchSource().sort("keyword.keyword")) + .allowPartialSearchResults(false); + if (randomBoolean()) { + request.setPreFilterShardSize(100); + } + + assertResponse(client().search(request), searchResponse -> { + assertThat(searchResponse.getSuccessfulShards(), equalTo(20)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getTotalShards(), equalTo(20)); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(4L)); + }); + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/build.gradle b/x-pack/plugin/security/qa/multi-cluster/build.gradle index 646ecd366639b..78ea22f0c6f1c 100644 --- a/x-pack/plugin/security/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/security/qa/multi-cluster/build.gradle @@ -25,6 +25,7 @@ dependencies { clusterModules project(':x-pack:plugin:enrich') clusterModules project(':x-pack:plugin:autoscaling') clusterModules project(':x-pack:plugin:ml') + clusterModules project(xpackModule('ilm')) clusterModules(project(":modules:ingest-common")) } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityDataStreamEsqlRcs1IT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityDataStreamEsqlRcs1IT.java new file mode 100644 index 0000000000000..57eb583912c49 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityDataStreamEsqlRcs1IT.java @@ -0,0 +1,402 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.Build; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.MapMatcher; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +import static org.elasticsearch.test.ListMatcher.matchesList; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; + +// TODO consolidate me with RemoteClusterSecurityDataStreamEsqlRcs2IT +public class RemoteClusterSecurityDataStreamEsqlRcs1IT extends AbstractRemoteClusterSecurityTestCase { + static { + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .module("x-pack-autoscaling") + .module("x-pack-esql") + .module("x-pack-enrich") + .module("x-pack-ml") + .module("x-pack-ilm") + .module("ingest-common") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.authc.token.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-autoscaling") + .module("x-pack-esql") + .module("x-pack-enrich") + .module("x-pack-ml") + .module("x-pack-ilm") + .module("ingest-common") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.authc.token.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("logs_foo_all", "x-pack-test-password", "logs_foo_all", false) + .user("logs_foo_16_only", "x-pack-test-password", "logs_foo_16_only", false) + .user("logs_foo_after_2021", "x-pack-test-password", "logs_foo_after_2021", false) + .user("logs_foo_after_2021_pattern", "x-pack-test-password", "logs_foo_after_2021_pattern", false) + .user("logs_foo_after_2021_alias", "x-pack-test-password", "logs_foo_after_2021_alias", false) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + public void testDataStreamsWithDlsAndFls() throws Exception { + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, true, randomBoolean(), randomBoolean()); + createDataStreamOnFulfillingCluster(); + setupAdditionalUsersAndRoles(); + + doTestDataStreamsWithFlsAndDls(); + } + + private void setupAdditionalUsersAndRoles() throws IOException { + createUserAndRoleOnQueryCluster("fls_user_logs_pattern", "fls_user_logs_pattern", """ + { + "indices": [ + { + "names": ["logs-*"], + "privileges": ["read"], + "field_security": { + "grant": ["@timestamp", "data_stream.namespace"] + } + } + ] + }"""); + createUserAndRoleOnFulfillingCluster("fls_user_logs_pattern", "fls_user_logs_pattern", """ + { + "indices": [ + { + "names": ["logs-*"], + "privileges": ["read"], + "field_security": { + "grant": ["@timestamp", "data_stream.namespace"] + } + } + ] + }"""); + } + + static void createUserAndRoleOnQueryCluster(String username, String roleName, String roleJson) throws IOException { + final var putRoleRequest = new Request("PUT", "/_security/role/" + roleName); + putRoleRequest.setJsonEntity(roleJson); + assertOK(adminClient().performRequest(putRoleRequest)); + + final var putUserRequest = new Request("PUT", "/_security/user/" + username); + putUserRequest.setJsonEntity(Strings.format(""" + { + "password": "%s", + "roles" : ["%s"] + }""", PASS, roleName)); + assertOK(adminClient().performRequest(putUserRequest)); + } + + static void createUserAndRoleOnFulfillingCluster(String username, String roleName, String roleJson) throws IOException { + final var putRoleRequest = new Request("PUT", "/_security/role/" + roleName); + putRoleRequest.setJsonEntity(roleJson); + assertOK(performRequestAgainstFulfillingCluster(putRoleRequest)); + + final var putUserRequest = new Request("PUT", "/_security/user/" + username); + putUserRequest.setJsonEntity(Strings.format(""" + { + "password": "%s", + "roles" : ["%s"] + }""", PASS, roleName)); + assertOK(performRequestAgainstFulfillingCluster(putUserRequest)); + } + + static Response runESQLCommandAgainstQueryCluster(String user, String command) throws IOException { + if (command.toLowerCase(Locale.ROOT).contains("limit") == false) { + // add a (high) limit to avoid warnings on default limit + command += " | limit 10000000"; + } + XContentBuilder json = JsonXContent.contentBuilder(); + json.startObject(); + json.field("query", command); + addRandomPragmas(json); + json.endObject(); + Request request = new Request("POST", "_query"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(json)); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); + request.addParameter("error_trace", "true"); + Response response = adminClient().performRequest(request); + assertOK(response); + return response; + } + + static void addRandomPragmas(XContentBuilder builder) throws IOException { + if (Build.current().isSnapshot()) { + Settings pragmas = randomPragmas(); + if (pragmas != Settings.EMPTY) { + builder.startObject("pragma"); + builder.value(pragmas); + builder.endObject(); + } + } + } + + static Settings randomPragmas() { + Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + settings.put("page_size", between(1, 5)); + } + if (randomBoolean()) { + settings.put("exchange_buffer_size", between(1, 2)); + } + if (randomBoolean()) { + settings.put("data_partitioning", randomFrom("shard", "segment", "doc")); + } + if (randomBoolean()) { + settings.put("enrich_max_workers", between(1, 5)); + } + if (randomBoolean()) { + settings.put("node_level_reduction", randomBoolean()); + } + return settings.build(); + } + + static void createDataStreamOnFulfillingCluster() throws Exception { + createDataStreamPolicy(AbstractRemoteClusterSecurityTestCase::performRequestAgainstFulfillingCluster); + createDataStreamComponentTemplate(AbstractRemoteClusterSecurityTestCase::performRequestAgainstFulfillingCluster); + createDataStreamIndexTemplate(AbstractRemoteClusterSecurityTestCase::performRequestAgainstFulfillingCluster); + createDataStreamDocuments(AbstractRemoteClusterSecurityTestCase::performRequestAgainstFulfillingCluster); + createDataStreamAlias(AbstractRemoteClusterSecurityTestCase::performRequestAgainstFulfillingCluster); + } + + private static void createDataStreamPolicy(CheckedFunction requestConsumer) throws Exception { + Request request = new Request("PUT", "_ilm/policy/my-lifecycle-policy"); + request.setJsonEntity(""" + { + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_primary_shard_size": "50gb" + } + } + }, + "delete": { + "min_age": "735d", + "actions": { + "delete": {} + } + } + } + } + }"""); + + requestConsumer.apply(request); + } + + private static void createDataStreamComponentTemplate(CheckedFunction requestConsumer) throws Exception { + Request request = new Request("PUT", "_component_template/my-template"); + request.setJsonEntity(""" + { + "template": { + "settings": { + "index.lifecycle.name": "my-lifecycle-policy" + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date", + "format": "date_optional_time||epoch_millis" + }, + "data_stream": { + "properties": { + "namespace": {"type": "keyword"}, + "environment": {"type": "keyword"} + } + } + } + } + } + }"""); + requestConsumer.apply(request); + } + + private static void createDataStreamIndexTemplate(CheckedFunction requestConsumer) throws Exception { + Request request = new Request("PUT", "_index_template/my-index-template"); + request.setJsonEntity(""" + { + "index_patterns": ["logs-*"], + "data_stream": {}, + "composed_of": ["my-template"], + "priority": 500 + }"""); + requestConsumer.apply(request); + } + + private static void createDataStreamDocuments(CheckedFunction requestConsumer) throws Exception { + Request request = new Request("POST", "logs-foo/_bulk"); + request.addParameter("refresh", ""); + request.setJsonEntity(""" + { "create" : {} } + { "@timestamp": "2099-05-06T16:21:15.000Z", "data_stream": {"namespace": "16", "environment": "dev"} } + { "create" : {} } + { "@timestamp": "2001-05-06T16:21:15.000Z", "data_stream": {"namespace": "17", "environment": "prod"} } + """); + assertMap(entityAsMap(requestConsumer.apply(request)), matchesMap().extraOk().entry("errors", false)); + } + + private static void createDataStreamAlias(CheckedFunction requestConsumer) throws Exception { + Request request = new Request("PUT", "_alias"); + request.setJsonEntity(""" + { + "actions": [ + { + "add": { + "index": "logs-foo", + "alias": "alias-foo" + } + } + ] + }"""); + assertMap(entityAsMap(requestConsumer.apply(request)), matchesMap().extraOk().entry("errors", false)); + } + + static void doTestDataStreamsWithFlsAndDls() throws IOException { + // DLS + MapMatcher twoResults = matchesMap().extraOk().entry("values", matchesList().item(matchesList().item(2))); + MapMatcher oneResult = matchesMap().extraOk().entry("values", matchesList().item(matchesList().item(1))); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_all", "FROM my_remote_cluster:logs-foo | STATS COUNT(*)")), + twoResults + ); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_16_only", "FROM my_remote_cluster:logs-foo | STATS COUNT(*)")), + oneResult + ); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_after_2021", "FROM my_remote_cluster:logs-foo | STATS COUNT(*)")), + oneResult + ); + assertMap( + entityAsMap( + runESQLCommandAgainstQueryCluster("logs_foo_after_2021_pattern", "FROM my_remote_cluster:logs-foo | STATS COUNT(*)") + ), + oneResult + ); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_all", "FROM my_remote_cluster:logs-* | STATS COUNT(*)")), + twoResults + ); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_16_only", "FROM my_remote_cluster:logs-* | STATS COUNT(*)")), + oneResult + ); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_after_2021", "FROM my_remote_cluster:logs-* | STATS COUNT(*)")), + oneResult + ); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_after_2021_pattern", "FROM my_remote_cluster:logs-* | STATS COUNT(*)")), + oneResult + ); + + assertMap( + entityAsMap( + runESQLCommandAgainstQueryCluster("logs_foo_after_2021_alias", "FROM my_remote_cluster:alias-foo | STATS COUNT(*)") + ), + oneResult + ); + assertMap( + entityAsMap(runESQLCommandAgainstQueryCluster("logs_foo_after_2021_alias", "FROM my_remote_cluster:alias-* | STATS COUNT(*)")), + oneResult + ); + + // FLS + // logs_foo_all does not have FLS restrictions so should be able to access all fields + assertMap( + entityAsMap( + runESQLCommandAgainstQueryCluster("logs_foo_all", "FROM my_remote_cluster:logs-foo | SORT data_stream.namespace | LIMIT 1") + ), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "@timestamp").entry("type", "date"), + matchesMap().entry("name", "data_stream.environment").entry("type", "keyword"), + matchesMap().entry("name", "data_stream.namespace").entry("type", "keyword") + ) + ) + ); + assertMap( + entityAsMap( + runESQLCommandAgainstQueryCluster("logs_foo_all", "FROM my_remote_cluster:logs-* | SORT data_stream.namespace | LIMIT 1") + ), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "@timestamp").entry("type", "date"), + matchesMap().entry("name", "data_stream.environment").entry("type", "keyword"), + matchesMap().entry("name", "data_stream.namespace").entry("type", "keyword") + ) + ) + ); + + assertMap( + entityAsMap( + runESQLCommandAgainstQueryCluster( + "fls_user_logs_pattern", + "FROM my_remote_cluster:logs-foo | SORT data_stream.namespace | LIMIT 1" + ) + ), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "@timestamp").entry("type", "date"), + matchesMap().entry("name", "data_stream.namespace").entry("type", "keyword") + ) + ) + ); + assertMap( + entityAsMap( + runESQLCommandAgainstQueryCluster( + "fls_user_logs_pattern", + "FROM my_remote_cluster:logs-* | SORT data_stream.namespace | LIMIT 1" + ) + ), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "@timestamp").entry("type", "date"), + matchesMap().entry("name", "data_stream.namespace").entry("type", "keyword") + ) + ) + ); + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityDataStreamEsqlRcs2IT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityDataStreamEsqlRcs2IT.java new file mode 100644 index 0000000000000..c5cf704177020 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityDataStreamEsqlRcs2IT.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.junit.RunnableTestRuleAdapter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityDataStreamEsqlRcs1IT.createDataStreamOnFulfillingCluster; +import static org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityDataStreamEsqlRcs1IT.createUserAndRoleOnQueryCluster; +import static org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityDataStreamEsqlRcs1IT.doTestDataStreamsWithFlsAndDls; + +// TODO consolidate me with RemoteClusterSecurityDataStreamEsqlRcs1IT +public class RemoteClusterSecurityDataStreamEsqlRcs2IT extends AbstractRemoteClusterSecurityTestCase { + private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + private static final AtomicBoolean NODE1_RCS_SERVER_ENABLED = new AtomicBoolean(); + private static final AtomicBoolean NODE2_RCS_SERVER_ENABLED = new AtomicBoolean(); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(3) + .module("x-pack-autoscaling") + .module("x-pack-esql") + .module("x-pack-enrich") + .module("x-pack-ml") + .module("x-pack-ilm") + .module("ingest-common") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .node(1, spec -> spec.setting("remote_cluster_server.enabled", () -> String.valueOf(NODE1_RCS_SERVER_ENABLED.get()))) + .node(2, spec -> spec.setting("remote_cluster_server.enabled", () -> String.valueOf(NODE2_RCS_SERVER_ENABLED.get()))) + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-autoscaling") + .module("x-pack-esql") + .module("x-pack-enrich") + .module("x-pack-ml") + .module("x-pack-ilm") + .module("ingest-common") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("cluster.remote.my_remote_cluster.credentials", () -> { + if (API_KEY_MAP_REF.get() == null) { + final Map apiKeyMap = createCrossClusterAccessApiKey(""" + { + "search": [ + { + "names": ["logs-*", "alias-*"] + } + ] + }"""); + API_KEY_MAP_REF.set(apiKeyMap); + } + return (String) API_KEY_MAP_REF.get().get("encoded"); + }) + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("logs_foo_all", "x-pack-test-password", "logs_foo_all", false) + .user("logs_foo_16_only", "x-pack-test-password", "logs_foo_16_only", false) + .user("logs_foo_after_2021", "x-pack-test-password", "logs_foo_after_2021", false) + .user("logs_foo_after_2021_pattern", "x-pack-test-password", "logs_foo_after_2021_pattern", false) + .user("logs_foo_after_2021_alias", "x-pack-test-password", "logs_foo_after_2021_alias", false) + .build(); + } + + @ClassRule + // Use a RuleChain to ensure that fulfilling cluster is started before query cluster + // `SSL_ENABLED_REF` is used to control the SSL-enabled setting on the test clusters + // We set it here, since randomization methods are not available in the static initialize context above + public static TestRule clusterRule = RuleChain.outerRule(new RunnableTestRuleAdapter(() -> { + SSL_ENABLED_REF.set(usually()); + NODE1_RCS_SERVER_ENABLED.set(randomBoolean()); + NODE2_RCS_SERVER_ENABLED.set(randomBoolean()); + })).around(fulfillingCluster).around(queryCluster); + + public void testDataStreamsWithDlsAndFls() throws Exception { + configureRemoteCluster(); + createDataStreamOnFulfillingCluster(); + setupAdditionalUsersAndRoles(); + + doTestDataStreamsWithFlsAndDls(); + } + + private void setupAdditionalUsersAndRoles() throws IOException { + createUserAndRoleOnQueryCluster("fls_user_logs_pattern", "fls_user_logs_pattern", """ + { + "indices": [{"names": [""], "privileges": ["read"]}], + "remote_indices": [ + { + "names": ["logs-*"], + "privileges": ["read"], + "field_security": { + "grant": ["@timestamp", "data_stream.namespace"] + }, + "clusters": ["*"] + } + ] + }"""); + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 09449f81121fd..d6bad85161fd9 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.remotecluster; +import org.apache.http.client.methods.HttpGet; import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -22,6 +23,7 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.junit.RunnableTestRuleAdapter; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; @@ -34,6 +36,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Base64; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -51,6 +54,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.not; public class RemoteClusterSecurityEsqlIT extends AbstractRemoteClusterSecurityTestCase { private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); @@ -342,6 +346,14 @@ public void testCrossClusterQuery() throws Exception { configureRemoteCluster(); populateData(); + Map esqlCcsLicenseFeatureUsage = fetchEsqlCcsFeatureUsageFromNode(client()); + + Object ccsLastUsedTimestampAtStartOfTest = null; + if (esqlCcsLicenseFeatureUsage.isEmpty() == false) { + // some test runs will have a usage value already, so capture that to compare at end of test + ccsLastUsedTimestampAtStartOfTest = esqlCcsLicenseFeatureUsage.get("last_used"); + } + // query remote cluster only Request request = esqlRequest(""" FROM my_remote_cluster:employees @@ -385,6 +397,15 @@ public void testCrossClusterQuery() throws Exception { | LIMIT 2 | KEEP emp_id, department""")); assertRemoteOnlyAgainst2IndexResults(response); + + // check that the esql-ccs license feature is now present and that the last_used field has been updated + esqlCcsLicenseFeatureUsage = fetchEsqlCcsFeatureUsageFromNode(client()); + assertThat(esqlCcsLicenseFeatureUsage.size(), equalTo(5)); + Object lastUsed = esqlCcsLicenseFeatureUsage.get("last_used"); + assertNotNull("lastUsed should not be null", lastUsed); + if (ccsLastUsedTimestampAtStartOfTest != null) { + assertThat(lastUsed.toString(), not(equalTo(ccsLastUsedTimestampAtStartOfTest.toString()))); + } } @SuppressWarnings("unchecked") @@ -1660,4 +1681,18 @@ void assertExpectedClustersForMissingIndicesTests(Map responseMa assertThat((int) shards.get("failed"), is(0)); } } + + private static Map fetchEsqlCcsFeatureUsageFromNode(RestClient client) throws IOException { + Request request = new Request(HttpGet.METHOD_NAME, "_license/feature_usage"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue(USER, PASS))); + Response response = client.performRequest(request); + ObjectPath path = ObjectPath.createFromResponse(response); + List> features = path.evaluate("features"); + for (var feature : features) { + if ("esql-ccs".equals(feature.get("name"))) { + return feature; + } + } + return Collections.emptyMap(); + } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/resources/roles.yml b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/resources/roles.yml index b61daa068ed1a..c09f9dc620a4c 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/resources/roles.yml +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/resources/roles.yml @@ -41,3 +41,102 @@ ccr_user_role: manage_role: cluster: [ 'manage' ] + +logs_foo_all: + cluster: [] + indices: + - names: [ 'logs-foo' ] + privileges: [ 'read' ] + remote_indices: + - names: [ 'logs-foo' ] + clusters: [ '*' ] + privileges: [ 'read' ] + +logs_foo_16_only: + cluster: [] + indices: + - names: [ 'logs-foo' ] + privileges: [ 'read' ] + query: | + { + "term": { + "data_stream.namespace": "16" + } + } + remote_indices: + - names: [ 'logs-foo' ] + clusters: [ '*' ] + privileges: [ 'read' ] + query: | + { + "term": { + "data_stream.namespace": "16" + } + } + +logs_foo_after_2021: + cluster: [] + indices: + - names: [ 'logs-foo' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + remote_indices: + - names: [ 'logs-foo' ] + clusters: [ '*' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + +logs_foo_after_2021_pattern: + cluster: [] + indices: + - names: [ 'logs-*' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + remote_indices: + - names: [ 'logs-*' ] + clusters: [ '*' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + +logs_foo_after_2021_alias: + cluster: [] + indices: + - names: [ 'alias-foo' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + remote_indices: + - names: [ 'alias-foo' ] + clusters: [ '*' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index de01d03284ea6..c07e4b2c541a2 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -387,6 +387,7 @@ public class Constants { "cluster:monitor/xpack/esql/stats/dist", "cluster:monitor/xpack/inference", "cluster:monitor/xpack/inference/get", + "cluster:monitor/xpack/inference/unified", "cluster:monitor/xpack/inference/diagnostics/get", "cluster:monitor/xpack/inference/services/get", "cluster:monitor/xpack/info", @@ -637,7 +638,9 @@ public class Constants { "internal:admin/indices/prevalidate_shard_path", "internal:index/metadata/migration_version/update", new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/migration/reindex_status" : null, + new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/data_stream/index/reindex" : null, new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/data_stream/reindex" : null, + new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/data_stream/reindex_cancel" : null, "internal:admin/repository/verify", "internal:admin/repository/verify/coordinate" ).filter(Objects::nonNull).collect(Collectors.toUnmodifiableSet()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java index d4375d15e6a6d..5c3bb849b4e0c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java @@ -14,13 +14,16 @@ import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import java.util.Map; +import java.util.Random; import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @@ -60,6 +63,14 @@ public String configUsersRoles() { return super.configUsersRoles() + "my_kibana_user:kibana_user\n" + "kibana_user:kibana_user"; } + @Override + protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builder builder) { + // Prevent INDEX_CHECK_ON_STARTUP as a random setting since it could result in indices being checked for corruption before opening. + // When corruption is detected, it will prevent the shard from being opened. This check is expensive in terms of CPU and memory + // usage and causes intermittent CI failures due to timeout. + return super.setRandomIndexSettings(random, builder).put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), false); + } + public void testFieldMappings() throws Exception { final String index = "logstash-20-12-2015"; final String field = "foo"; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index 4b8fbfd41acdf..f2a91cb1b8e4e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -557,8 +557,11 @@ public void testSuggestProfilesWithHint() throws IOException { equalTo(profileHits4.subList(2, profileHits4.size())) ); + // Exclude profile for "*" space since that can match _all_ profiles, if the full name is a substring of "user" or the name of + // another profile + final List nonWildcardProfiles = profiles.stream().filter(p -> false == p.user().fullName().endsWith("*")).toList(); // A record will not be included if name does not match even when it has matching hint - final Profile hintedProfile5 = randomFrom(profiles); + final Profile hintedProfile5 = randomFrom(nonWildcardProfiles); final List profileHits5 = Arrays.stream( doSuggest( Set.of(), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 1c773a6e3963f..b5b9d0282c5fc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -128,6 +128,19 @@ public class RBACEngine implements AuthorizationEngine { private static final String DELETE_SUB_REQUEST_REPLICA = TransportDeleteAction.NAME + "[r]"; private static final Logger logger = LogManager.getLogger(RBACEngine.class); + + private static final Set SCROLL_RELATED_ACTIONS = Set.of( + TransportSearchScrollAction.TYPE.name(), + SearchTransportService.FETCH_ID_SCROLL_ACTION_NAME, + SearchTransportService.QUERY_FETCH_SCROLL_ACTION_NAME, + SearchTransportService.QUERY_SCROLL_ACTION_NAME, + SearchTransportService.FREE_CONTEXT_ACTION_NAME, + SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME, + TransportClearScrollAction.NAME, + "indices:data/read/sql/close_cursor", + SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME + ); + private final Settings settings; private final CompositeRolesStore rolesStore; private final FieldPermissionsCache fieldPermissionsCache; @@ -319,7 +332,7 @@ public void authorizeIndexAction( // need to validate that the action is allowed and then move on listener.onResponse(role.checkIndicesAction(action) ? IndexAuthorizationResult.EMPTY : IndexAuthorizationResult.DENIED); } else if (request instanceof IndicesRequest == false) { - if (isScrollRelatedAction(action)) { + if (SCROLL_RELATED_ACTIONS.contains(action)) { // scroll is special // some APIs are indices requests that are not actually associated with indices. For example, // search scroll request, is categorized under the indices context, but doesn't hold indices names @@ -999,17 +1012,6 @@ public int hashCode() { } } - private static boolean isScrollRelatedAction(String action) { - return action.equals(TransportSearchScrollAction.TYPE.name()) - || action.equals(SearchTransportService.FETCH_ID_SCROLL_ACTION_NAME) - || action.equals(SearchTransportService.QUERY_FETCH_SCROLL_ACTION_NAME) - || action.equals(SearchTransportService.QUERY_SCROLL_ACTION_NAME) - || action.equals(SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME) - || action.equals(TransportClearScrollAction.NAME) - || action.equals("indices:data/read/sql/close_cursor") - || action.equals(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME); - } - private static boolean isAsyncRelatedAction(String action) { return action.equals(SubmitAsyncSearchAction.NAME) || action.equals(GetAsyncSearchAction.NAME) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 932241da2c536..81f65668722fc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -92,7 +92,7 @@ setup: - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} # Testing for the entire function set isn't feasbile, so we just check that we return the correct count as an approximation. - - length: {esql.functions: 123} # check the "sister" test below for a likely update to the same esql.functions length check + - length: {esql.functions: 128} # check the "sister" test below for a likely update to the same esql.functions length check --- "Basic ESQL usage output (telemetry) non-snapshot version": @@ -163,4 +163,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 119} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 124} # check the "sister" test above for a likely update to the same esql.functions length check diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml index f50a7a65f53d3..9fb33b43f042f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml @@ -104,14 +104,23 @@ setup: name: my-data-stream - is_true: acknowledged -# Uncomment once the cancel API is in place -# - do: -# migrate.reindex: -# body: | -# { -# "mode": "upgrade", -# "source": { -# "index": "my-data-stream" -# } -# } -# - match: { task: "reindex-data-stream-my-data-stream" } + - do: + migrate.reindex: + body: | + { + "mode": "upgrade", + "source": { + "index": "my-data-stream" + } + } + - match: { acknowledged: true } + + - do: + migrate.cancel_reindex: + index: "my-data-stream" + - match: { acknowledged: true } + + - do: + catch: /resource_not_found_exception/ + migrate.cancel_reindex: + index: "my-data-stream" diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml index ae343a0b4db95..c94ce8dd211ae 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml @@ -46,25 +46,39 @@ setup: name: my-data-stream - is_true: acknowledged -# Uncomment once the cancel API is in place -# - do: -# migrate.reindex: -# body: | -# { -# "mode": "upgrade", -# "source": { -# "index": "my-data-stream" -# } -# } -# - match: { acknowledged: true } -# -# - do: -# migrate.get_reindex_status: -# index: "my-data-stream" -# - match: { complete: true } -# - match: { total_indices: 1 } -# - match: { total_indices_requiring_upgrade: 0 } -# - match: { successes: 0 } -# - match: { in_progress: 0 } -# - match: { pending: 0 } -# - match: { errors: [] } + - do: + migrate.reindex: + body: | + { + "mode": "upgrade", + "source": { + "index": "my-data-stream" + } + } + - match: { acknowledged: true } + + - do: + migrate.get_reindex_status: + index: "my-data-stream" + - match: { complete: true } + - match: { total_indices: 1 } + - match: { total_indices_requiring_upgrade: 0 } + - match: { successes: 0 } + - match: { in_progress: 0 } + - match: { pending: 0 } + - match: { errors: [] } + + - do: + migrate.cancel_reindex: + index: "my-data-stream" + - match: { acknowledged: true } + + - do: + catch: /resource_not_found_exception/ + migrate.cancel_reindex: + index: "my-data-stream" + + - do: + catch: /resource_not_found_exception/ + migrate.get_reindex_status: + index: "my-data-stream" diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java index 337d3c5820c07..24586e5f36337 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandler.java @@ -169,7 +169,14 @@ private void handleScriptException(ScriptException scriptException, boolean unat * @param numFailureRetries the number of configured retries */ private void handleBulkIndexingException(BulkIndexingException bulkIndexingException, boolean unattended, int numFailureRetries) { - if (unattended == false && bulkIndexingException.isIrrecoverable()) { + if (bulkIndexingException.getCause() instanceof ClusterBlockException) { + retryWithoutIncrementingFailureCount( + bulkIndexingException, + bulkIndexingException.getDetailedMessage(), + unattended, + numFailureRetries + ); + } else if (unattended == false && bulkIndexingException.isIrrecoverable()) { String message = TransformMessages.getMessage( TransformMessages.LOG_TRANSFORM_PIVOT_IRRECOVERABLE_BULK_INDEXING_ERROR, bulkIndexingException.getDetailedMessage() @@ -232,12 +239,46 @@ private void retry(Throwable unwrappedException, String message, boolean unatten && unwrappedException.getClass().equals(context.getLastFailure().getClass()); final int failureCount = context.incrementAndGetFailureCount(unwrappedException); - if (unattended == false && numFailureRetries != -1 && failureCount > numFailureRetries) { fail(unwrappedException, "task encountered more than " + numFailureRetries + " failures; latest failure: " + message); return; } + logRetry(unwrappedException, message, unattended, numFailureRetries, failureCount, repeatedFailure); + } + + /** + * Terminate failure handling without incrementing the retries used + *

+ * This is used when there is an ongoing recoverable issue and we want to retain + * retries for any issues that may occur after the issue is resolved + * + * @param unwrappedException The exception caught + * @param message error message to log/audit + * @param unattended whether the transform runs in unattended mode + * @param numFailureRetries the number of configured retries + */ + private void retryWithoutIncrementingFailureCount( + Throwable unwrappedException, + String message, + boolean unattended, + int numFailureRetries + ) { + // group failures to decide whether to report it below + final boolean repeatedFailure = context.getLastFailure() != null + && unwrappedException.getClass().equals(context.getLastFailure().getClass()); + + logRetry(unwrappedException, message, unattended, numFailureRetries, context.getFailureCount(), repeatedFailure); + } + + private void logRetry( + Throwable unwrappedException, + String message, + boolean unattended, + int numFailureRetries, + int failureCount, + boolean repeatedFailure + ) { // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one // and if the number of retries is about to exceed diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java index 84c8d4e140408..3894ff3043ccd 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformFailureHandlerTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import org.elasticsearch.xpack.transform.notifications.MockTransformAuditor; +import java.util.List; import java.util.Map; import java.util.Set; @@ -63,9 +64,121 @@ public int getFailureCountChangedCounter() { } } - public void testUnattended() { + public void testHandleIndexerFailure_CircuitBreakingExceptionNewPageSizeLessThanMinimumPageSize() { + var e = new CircuitBreakingException(randomAlphaOfLength(10), 1, 0, randomFrom(CircuitBreaker.Durability.values())); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_CircuitBreakingExceptionNewPageSizeNotLessThanMinimumPageSize() { + var e = new CircuitBreakingException(randomAlphaOfLength(10), 1, 1, randomFrom(CircuitBreaker.Durability.values())); + + List.of(true, false).forEach((unattended) -> { assertNoFailureAndContextPageSizeSet(e, unattended, 365); }); + } + + public void testHandleIndexerFailure_ScriptException() { + var e = new ScriptException( + randomAlphaOfLength(10), + new ArithmeticException(randomAlphaOfLength(10)), + singletonList(randomAlphaOfLength(10)), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_BulkIndexExceptionWrappingClusterBlockException() { + final BulkIndexingException bulkIndexingException = new BulkIndexingException( + randomAlphaOfLength(10), + new ClusterBlockException(Map.of("test-index", Set.of(MetadataIndexStateService.INDEX_CLOSED_BLOCK))), + randomBoolean() + ); + + List.of(true, false).forEach((unattended) -> { assertRetryFailureCountNotIncremented(bulkIndexingException, unattended); }); + } + + public void testHandleIndexerFailure_IrrecoverableBulkIndexException() { + final BulkIndexingException e = new BulkIndexingException( + randomAlphaOfLength(10), + new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.INTERNAL_SERVER_ERROR), + true + ); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_RecoverableBulkIndexException() { + final BulkIndexingException bulkIndexingException = new BulkIndexingException( + randomAlphaOfLength(10), + new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.INTERNAL_SERVER_ERROR), + false + ); + + List.of(true, false).forEach((unattended) -> { assertRetry(bulkIndexingException, unattended); }); + } + + public void testHandleIndexerFailure_ClusterBlockException() { + List.of(true, false).forEach((unattended) -> { + assertRetry( + new ClusterBlockException(Map.of(randomAlphaOfLength(10), Set.of(MetadataIndexStateService.INDEX_CLOSED_BLOCK))), + unattended + ); + }); + } + + public void testHandleIndexerFailure_SearchPhaseExecutionExceptionWithNoShardSearchFailures() { + List.of(true, false).forEach((unattended) -> { + assertRetry( + new SearchPhaseExecutionException(randomAlphaOfLength(10), randomAlphaOfLength(10), ShardSearchFailure.EMPTY_ARRAY), + unattended + ); + }); + } + + public void testHandleIndexerFailure_SearchPhaseExecutionExceptionWithShardSearchFailures() { + List.of(true, false).forEach((unattended) -> { + assertRetry( + new SearchPhaseExecutionException( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + new ShardSearchFailure[] { new ShardSearchFailure(new Exception()) } + ), + unattended + ); + }); + } + + public void testHandleIndexerFailure_RecoverableElasticsearchException() { + List.of(true, false).forEach((unattended) -> { + assertRetry(new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.INTERNAL_SERVER_ERROR), unattended); + }); + } + + public void testHandleIndexerFailure_IrrecoverableElasticsearchException() { + var e = new ElasticsearchStatusException(randomAlphaOfLength(10), RestStatus.NOT_FOUND); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_IllegalArgumentException() { + var e = new IllegalArgumentException(randomAlphaOfLength(10)); + assertRetryIfUnattendedOtherwiseFail(e); + } + + public void testHandleIndexerFailure_UnexpectedException() { + List.of(true, false).forEach((unattended) -> { assertRetry(new Exception(), unattended); }); + } + + private void assertRetryIfUnattendedOtherwiseFail(Exception e) { + List.of(true, false).forEach((unattended) -> { + if (unattended) { + assertRetry(e, unattended); + } else { + assertFailure(e); + } + }); + } + + private void assertRetry(Exception e, boolean unattended) { String transformId = randomAlphaOfLength(10); - SettingsConfig settings = new SettingsConfig.Builder().setUnattended(true).build(); + SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).setUnattended(unattended).build(); MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); MockTransformContextListener contextListener = new MockTransformContextListener(); @@ -74,51 +187,33 @@ public void testUnattended() { TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); - handler.handleIndexerFailure( - new SearchPhaseExecutionException( - "query", - "Partial shards failure", - new ShardSearchFailure[] { - new ShardSearchFailure(new CircuitBreakingException("to much memory", 110, 100, CircuitBreaker.Durability.TRANSIENT)) } - ), - settings - ); + assertNoFailure(handler, e, contextListener, settings, true); + assertNoFailure(handler, e, contextListener, settings, true); + if (unattended) { + assertNoFailure(handler, e, contextListener, settings, true); + } else { + // fail after max retry attempts reached + assertFailure(handler, e, contextListener, settings, true); + } + } - // CBE isn't a failure, but it only affects page size(which we don't test here) - assertFalse(contextListener.getFailed()); - assertEquals(0, contextListener.getFailureCountChangedCounter()); + private void assertRetryFailureCountNotIncremented(Exception e, boolean unattended) { + String transformId = randomAlphaOfLength(10); + SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).setUnattended(unattended).build(); - assertNoFailure( - handler, - new SearchPhaseExecutionException( - "query", - "Partial shards failure", - new ShardSearchFailure[] { - new ShardSearchFailure( - new ScriptException( - "runtime error", - new ArithmeticException("/ by zero"), - singletonList("stack"), - "test", - "painless" - ) - ) } - ), - contextListener, - settings - ); - assertNoFailure( - handler, - new ElasticsearchStatusException("something really bad happened", RestStatus.INTERNAL_SERVER_ERROR), - contextListener, - settings - ); - assertNoFailure(handler, new IllegalArgumentException("expected apples not oranges"), contextListener, settings); - assertNoFailure(handler, new RuntimeException("the s*** hit the fan"), contextListener, settings); - assertNoFailure(handler, new NullPointerException("NPE"), contextListener, settings); + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + MockTransformContextListener contextListener = new MockTransformContextListener(); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + context.setPageSize(500); + + TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); + + assertNoFailure(handler, e, contextListener, settings, false); + assertNoFailure(handler, e, contextListener, settings, false); + assertNoFailure(handler, e, contextListener, settings, false); } - public void testClusterBlock() { + private void assertFailure(Exception e) { String transformId = randomAlphaOfLength(10); SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).build(); @@ -129,32 +224,50 @@ public void testClusterBlock() { TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); - final ClusterBlockException clusterBlock = new ClusterBlockException( - Map.of("test-index", Set.of(MetadataIndexStateService.INDEX_CLOSED_BLOCK)) - ); + assertFailure(handler, e, contextListener, settings, false); + } - handler.handleIndexerFailure(clusterBlock, settings); - assertFalse(contextListener.getFailed()); - assertEquals(1, contextListener.getFailureCountChangedCounter()); + private void assertNoFailure( + TransformFailureHandler handler, + Exception e, + MockTransformContextListener mockTransformContextListener, + SettingsConfig settings, + boolean failureCountIncremented + ) { + handler.handleIndexerFailure(e, settings); + assertFalse(mockTransformContextListener.getFailed()); + assertEquals(failureCountIncremented ? 1 : 0, mockTransformContextListener.getFailureCountChangedCounter()); + mockTransformContextListener.reset(); + } - handler.handleIndexerFailure(clusterBlock, settings); - assertFalse(contextListener.getFailed()); - assertEquals(2, contextListener.getFailureCountChangedCounter()); + private void assertNoFailureAndContextPageSizeSet(Exception e, boolean unattended, int newPageSize) { + String transformId = randomAlphaOfLength(10); + SettingsConfig settings = new SettingsConfig.Builder().setNumFailureRetries(2).setUnattended(unattended).build(); - handler.handleIndexerFailure(clusterBlock, settings); - assertTrue(contextListener.getFailed()); - assertEquals(3, contextListener.getFailureCountChangedCounter()); + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + MockTransformContextListener contextListener = new MockTransformContextListener(); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + context.setPageSize(500); + + TransformFailureHandler handler = new TransformFailureHandler(auditor, context, transformId); + + handler.handleIndexerFailure(e, settings); + assertFalse(contextListener.getFailed()); + assertEquals(0, contextListener.getFailureCountChangedCounter()); + assertEquals(newPageSize, context.getPageSize()); + contextListener.reset(); } - private void assertNoFailure( + private void assertFailure( TransformFailureHandler handler, Exception e, MockTransformContextListener mockTransformContextListener, - SettingsConfig settings + SettingsConfig settings, + boolean failureCountChanged ) { handler.handleIndexerFailure(e, settings); - assertFalse(mockTransformContextListener.getFailed()); - assertEquals(1, mockTransformContextListener.getFailureCountChangedCounter()); + assertTrue(mockTransformContextListener.getFailed()); + assertEquals(failureCountChanged ? 1 : 0, mockTransformContextListener.getFailureCountChangedCounter()); mockTransformContextListener.reset(); } diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml index 4c0bbfd7ec139..1b435c551fbe9 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml @@ -86,11 +86,12 @@ teardown: ignore: 404 --- -"Index data and search on the mixed cluster": +"ES|QL cross-cluster query fails with basic license": - skip: features: allowed_warnings - do: + catch: bad_request allowed_warnings: - "Line 1:21: Square brackets '[]' need to be removed in FROM METADATA declaration" headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } @@ -98,23 +99,11 @@ teardown: body: query: 'FROM *:esql*,esql_* | STATS total = sum(cost) by tag | SORT tag | LIMIT 10' - - match: {columns.0.name: "total"} - - match: {columns.0.type: "long"} - - match: {columns.1.name: "tag"} - - match: {columns.1.type: "keyword"} - - - match: {values.0.0: 2200} - - match: {values.0.1: "computer"} - - match: {values.1.0: 170} - - match: {values.1.1: "headphone"} - - match: {values.2.0: 2100 } - - match: {values.2.1: "laptop" } - - match: {values.3.0: 1000 } - - match: {values.3.1: "monitor" } - - match: {values.4.0: 550 } - - match: {values.4.1: "tablet" } + - match: { error.type: "status_exception" } + - match: { error.reason: "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: active basic license" } - do: + catch: bad_request allowed_warnings: - "Line 1:21: Square brackets '[]' need to be removed in FROM METADATA declaration" headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } @@ -128,28 +117,11 @@ teardown: lte: "2023-01-03" format: "yyyy-MM-dd" - - match: {columns.0.name: "_index"} - - match: {columns.0.type: "keyword"} - - match: {columns.1.name: "tag"} - - match: {columns.1.type: "keyword"} - - match: {columns.2.name: "cost" } - - match: {columns.2.type: "long" } - - - match: {values.0.0: "esql_local"} - - match: {values.0.1: "monitor"} - - match: {values.0.2: 250 } - - match: {values.1.0: "my_remote_cluster:esql_index" } - - match: {values.1.1: "tablet"} - - match: {values.1.2: 450 } - - match: {values.2.0: "my_remote_cluster:esql_index" } - - match: {values.2.1: "computer" } - - match: {values.2.2: 1200 } - - match: {values.3.0: "esql_local"} - - match: {values.3.1: "laptop" } - - match: {values.3.2: 2100 } + - match: { error.type: "status_exception" } + - match: { error.reason: "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: active basic license" } --- -"Enrich across clusters": +"ES|QL enrich query across clusters fails with basic license": - requires: cluster_features: ["gte_v8.13.0"] reason: "Enrich across clusters available in 8.13 or later" @@ -194,27 +166,14 @@ teardown: index: suggestions - do: + catch: bad_request headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } esql.query: body: query: 'FROM *:esql*,esql_* | STATS total = sum(cost) by tag | SORT total DESC | LIMIT 3 | ENRICH suggestions | KEEP tag, total, phrase' - - match: {columns.0.name: "tag"} - - match: {columns.0.type: "keyword"} - - match: {columns.1.name: "total" } - - match: {columns.1.type: "long" } - - match: {columns.2.name: "phrase" } - - match: {columns.2.type: "keyword" } - - - match: {values.0.0: "computer"} - - match: {values.0.1: 2200} - - match: {values.0.2: "best desktop for programming"} - - match: {values.1.0: "laptop"} - - match: {values.1.1: 2100 } - - match: {values.1.2: "the best battery life laptop"} - - match: {values.2.0: "monitor" } - - match: {values.2.1: 1000 } - - match: {values.2.2: "4k or 5k or 6K monitor?" } + - match: { error.type: "status_exception" } + - match: { error.reason: "A valid Enterprise license is required to run ES|QL cross-cluster searches. License found: active basic license" } - do: enrich.delete_policy: diff --git a/x-pack/test/smb-fixture/build.gradle b/x-pack/test/smb-fixture/build.gradle index aeb5626ce9508..a982259edb2dd 100644 --- a/x-pack/test/smb-fixture/build.gradle +++ b/x-pack/test/smb-fixture/build.gradle @@ -2,6 +2,8 @@ apply plugin: 'elasticsearch.java' apply plugin: 'elasticsearch.cache-test-fixtures' dependencies { + implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api project(':test:fixtures:testcontainer-utils') api "junit:junit:${versions.junit}" api "org.testcontainers:testcontainers:${versions.testcontainer}" diff --git a/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java b/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java index 10f589e4e1df3..27d8257f4be10 100644 --- a/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java +++ b/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java @@ -7,12 +7,18 @@ package org.elasticsearch.test.fixtures.smb; +import com.github.dockerjava.api.model.Capability; + import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.containers.wait.strategy.WaitAllStrategy; import org.testcontainers.images.builder.ImageFromDockerfile; +import java.time.Duration; + public final class SmbTestContainer extends DockerEnvironmentAwareTestContainer { - private static final String DOCKER_BASE_IMAGE = "ubuntu:16.04"; + private static final String DOCKER_BASE_IMAGE = "ubuntu:24.04"; public static final int AD_LDAP_PORT = 636; public static final int AD_LDAP_GC_PORT = 3269; @@ -20,15 +26,15 @@ public SmbTestContainer() { super( new ImageFromDockerfile("es-smb-fixture").withDockerfileFromBuilder( builder -> builder.from(DOCKER_BASE_IMAGE) - .run("apt-get update -qqy && apt-get install -qqy samba ldap-utils") + .env("TZ", "Etc/UTC") + .run("DEBIAN_FRONTEND=noninteractive apt-get update -qqy && apt-get install -qqy tzdata winbind samba ldap-utils") .copy("fixture/provision/installsmb.sh", "/fixture/provision/installsmb.sh") .copy("fixture/certs/ca.key", "/fixture/certs/ca.key") .copy("fixture/certs/ca.pem", "/fixture/certs/ca.pem") .copy("fixture/certs/cert.pem", "/fixture/certs/cert.pem") .copy("fixture/certs/key.pem", "/fixture/certs/key.pem") .run("chmod +x /fixture/provision/installsmb.sh") - .run("/fixture/provision/installsmb.sh") - .cmd("service samba-ad-dc restart && sleep infinity") + .cmd("/fixture/provision/installsmb.sh && service samba-ad-dc restart && echo Samba started && sleep infinity") .build() ) .withFileFromClasspath("fixture/provision/installsmb.sh", "/smb/provision/installsmb.sh") @@ -37,10 +43,20 @@ public SmbTestContainer() { .withFileFromClasspath("fixture/certs/cert.pem", "/smb/certs/cert.pem") .withFileFromClasspath("fixture/certs/key.pem", "/smb/certs/key.pem") ); - // addExposedPort(389); - // addExposedPort(3268); + addExposedPort(AD_LDAP_PORT); addExposedPort(AD_LDAP_GC_PORT); + + setWaitStrategy( + new WaitAllStrategy().withStartupTimeout(Duration.ofSeconds(120)) + .withStrategy(Wait.forLogMessage(".*Samba started.*", 1)) + .withStrategy(Wait.forListeningPort()) + ); + + getCreateContainerCmdModifiers().add(createContainerCmd -> { + createContainerCmd.getHostConfig().withCapAdd(Capability.SYS_ADMIN); + return createContainerCmd; + }); } public String getAdLdapUrl() { diff --git a/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh b/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh old mode 100644 new mode 100755 index 463238b9f50c2..fe939431bb435 --- a/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh +++ b/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh @@ -21,7 +21,7 @@ cat $SSL_DIR/ca.pem >> /etc/ssl/certs/ca-certificates.crt mv /etc/samba/smb.conf /etc/samba/smb.conf.orig -samba-tool domain provision --server-role=dc --use-rfc2307 --dns-backend=SAMBA_INTERNAL --realm=AD.TEST.ELASTICSEARCH.COM --domain=ADES --adminpass=Passw0rd --use-ntvfs +samba-tool domain provision --server-role=dc --use-rfc2307 --dns-backend=SAMBA_INTERNAL --realm=AD.TEST.ELASTICSEARCH.COM --domain=ADES --adminpass=Passw0rd cp /var/lib/samba/private/krb5.conf /etc/krb5.conf